diff --git a/build.zig b/build.zig index 5c7ee4063b..6487cc615d 100644 --- a/build.zig +++ b/build.zig @@ -437,8 +437,8 @@ pub fn build(b: *std.Build) !void { .skip_non_native = skip_non_native, .skip_libc = skip_libc, .use_llvm = use_llvm, - // 2262585344 was observed on an x86_64-linux-gnu host. - .max_rss = 2488843878, + // 2520100864 was observed on an x86_64-linux-gnu host. + .max_rss = 2772110950, })); test_modules_step.dependOn(tests.addModuleTests(b, .{ diff --git a/doc/langref/test_intCast_builtin.zig b/doc/langref/test_intCast_builtin.zig index cfd5b9c092..835ba48379 100644 --- a/doc/langref/test_intCast_builtin.zig +++ b/doc/langref/test_intCast_builtin.zig @@ -5,4 +5,4 @@ test "integer cast panic" { _ = b; } -// test_error=cast truncated bits +// test_error=integer does not fit in destination type diff --git a/lib/std/Target.zig b/lib/std/Target.zig index bf5a6369b5..b60de995fa 100644 --- a/lib/std/Target.zig +++ b/lib/std/Target.zig @@ -1246,11 +1246,7 @@ pub const Cpu = struct { /// Adds the specified feature set but not its dependencies. pub fn addFeatureSet(set: *Set, other_set: Set) void { - if (builtin.zig_backend == .stage2_x86_64 and builtin.object_format == .coff) { - for (&set.ints, other_set.ints) |*set_int, other_set_int| set_int.* |= other_set_int; - } else { - set.ints = @as(@Vector(usize_count, usize), set.ints) | @as(@Vector(usize_count, usize), other_set.ints); - } + set.ints = @as(@Vector(usize_count, usize), set.ints) | @as(@Vector(usize_count, usize), other_set.ints); } /// Removes the specified feature but not its dependents. @@ -1262,11 +1258,7 @@ pub const Cpu = struct { /// Removes the specified feature but not its dependents. pub fn removeFeatureSet(set: *Set, other_set: Set) void { - if (builtin.zig_backend == .stage2_x86_64 and builtin.object_format == .coff) { - for (&set.ints, other_set.ints) |*set_int, other_set_int| set_int.* &= ~other_set_int; - } else { - set.ints = @as(@Vector(usize_count, usize), set.ints) & ~@as(@Vector(usize_count, usize), other_set.ints); - } + set.ints = @as(@Vector(usize_count, usize), set.ints) & ~@as(@Vector(usize_count, usize), other_set.ints); } pub fn populateDependencies(set: *Set, all_features_list: []const Cpu.Feature) void { @@ -1295,17 +1287,10 @@ pub const Cpu = struct { } pub fn isSuperSetOf(set: Set, other_set: Set) bool { - if (builtin.zig_backend == .stage2_x86_64 and builtin.object_format == .coff) { - var result = true; - for (&set.ints, other_set.ints) |*set_int, other_set_int| - result = result and (set_int.* & other_set_int) == other_set_int; - return result; - } else { - const V = @Vector(usize_count, usize); - const set_v: V = set.ints; - const other_v: V = other_set.ints; - return @reduce(.And, (set_v & other_v) == other_v); - } + const V = @Vector(usize_count, usize); + const set_v: V = set.ints; + const other_v: V = other_set.ints; + return @reduce(.And, (set_v & other_v) == other_v); } }; diff --git a/lib/std/array_hash_map.zig b/lib/std/array_hash_map.zig index b0b9e19169..ac9c70df8e 100644 --- a/lib/std/array_hash_map.zig +++ b/lib/std/array_hash_map.zig @@ -889,19 +889,10 @@ pub fn ArrayHashMapUnmanaged( self.pointer_stability.lock(); defer self.pointer_stability.unlock(); - if (new_capacity <= linear_scan_max) { - try self.entries.ensureTotalCapacity(gpa, new_capacity); - return; - } - - if (self.index_header) |header| { - if (new_capacity <= header.capacity()) { - try self.entries.ensureTotalCapacity(gpa, new_capacity); - return; - } - } - try self.entries.ensureTotalCapacity(gpa, new_capacity); + if (new_capacity <= linear_scan_max) return; + if (self.index_header) |header| if (new_capacity <= header.capacity()) return; + const new_bit_index = try IndexHeader.findBitIndex(new_capacity); const new_header = try IndexHeader.alloc(gpa, new_bit_index); @@ -2116,7 +2107,7 @@ const IndexHeader = struct { fn findBitIndex(desired_capacity: usize) Allocator.Error!u8 { if (desired_capacity > max_capacity) return error.OutOfMemory; - var new_bit_index = @as(u8, @intCast(std.math.log2_int_ceil(usize, desired_capacity))); + var new_bit_index: u8 = @intCast(std.math.log2_int_ceil(usize, desired_capacity)); if (desired_capacity > index_capacities[new_bit_index]) new_bit_index += 1; if (new_bit_index < min_bit_index) new_bit_index = min_bit_index; assert(desired_capacity <= index_capacities[new_bit_index]); diff --git a/lib/std/crypto/chacha20.zig b/lib/std/crypto/chacha20.zig index 564df2933f..495bad3efc 100644 --- a/lib/std/crypto/chacha20.zig +++ b/lib/std/crypto/chacha20.zig @@ -499,15 +499,12 @@ fn ChaChaNonVecImpl(comptime rounds_nb: usize) type { fn ChaChaImpl(comptime rounds_nb: usize) type { switch (builtin.cpu.arch) { .x86_64 => { - const has_avx2 = std.Target.x86.featureSetHas(builtin.cpu.features, .avx2); - const has_avx512f = std.Target.x86.featureSetHas(builtin.cpu.features, .avx512f); - if (builtin.zig_backend != .stage2_x86_64 and has_avx512f) return ChaChaVecImpl(rounds_nb, 4); - if (has_avx2) return ChaChaVecImpl(rounds_nb, 2); + if (builtin.zig_backend != .stage2_x86_64 and std.Target.x86.featureSetHas(builtin.cpu.features, .avx512f)) return ChaChaVecImpl(rounds_nb, 4); + if (std.Target.x86.featureSetHas(builtin.cpu.features, .avx2)) return ChaChaVecImpl(rounds_nb, 2); return ChaChaVecImpl(rounds_nb, 1); }, .aarch64 => { - const has_neon = std.Target.aarch64.featureSetHas(builtin.cpu.features, .neon); - if (has_neon) return ChaChaVecImpl(rounds_nb, 4); + if (builtin.zig_backend != .stage2_aarch64 and std.Target.aarch64.featureSetHas(builtin.cpu.features, .neon)) return ChaChaVecImpl(rounds_nb, 4); return ChaChaNonVecImpl(rounds_nb); }, else => return ChaChaNonVecImpl(rounds_nb), diff --git a/lib/std/debug.zig b/lib/std/debug.zig index 527676566c..48b13a6b1a 100644 --- a/lib/std/debug.zig +++ b/lib/std/debug.zig @@ -78,13 +78,9 @@ pub fn FullPanic(comptime panicFn: fn ([]const u8, ?usize) noreturn) type { @branchHint(.cold); call("invalid error code", @returnAddress()); } - pub fn castTruncatedData() noreturn { + pub fn integerOutOfBounds() noreturn { @branchHint(.cold); - call("integer cast truncated bits", @returnAddress()); - } - pub fn negativeToUnsigned() noreturn { - @branchHint(.cold); - call("attempt to cast negative value to unsigned integer", @returnAddress()); + call("integer does not fit in destination type", @returnAddress()); } pub fn integerOverflow() noreturn { @branchHint(.cold); @@ -126,8 +122,6 @@ pub fn FullPanic(comptime panicFn: fn ([]const u8, ?usize) noreturn) type { @branchHint(.cold); call("for loop over objects with non-equal lengths", @returnAddress()); } - /// Delete after next zig1.wasm update - pub const memcpyLenMismatch = copyLenMismatch; pub fn copyLenMismatch() noreturn { @branchHint(.cold); call("source and destination arguments have non-equal lengths", @returnAddress()); diff --git a/lib/std/debug/no_panic.zig b/lib/std/debug/no_panic.zig index 0a4996097a..f24317b9b7 100644 --- a/lib/std/debug/no_panic.zig +++ b/lib/std/debug/no_panic.zig @@ -65,12 +65,7 @@ pub fn invalidErrorCode() noreturn { @trap(); } -pub fn castTruncatedData() noreturn { - @branchHint(.cold); - @trap(); -} - -pub fn negativeToUnsigned() noreturn { +pub fn integerOutOfBounds() noreturn { @branchHint(.cold); @trap(); } @@ -125,9 +120,6 @@ pub fn forLenMismatch() noreturn { @trap(); } -/// Delete after next zig1.wasm update -pub const memcpyLenMismatch = copyLenMismatch; - pub fn copyLenMismatch() noreturn { @branchHint(.cold); @trap(); diff --git a/lib/std/debug/simple_panic.zig b/lib/std/debug/simple_panic.zig index 568f7de495..95f7d679ed 100644 --- a/lib/std/debug/simple_panic.zig +++ b/lib/std/debug/simple_panic.zig @@ -72,12 +72,8 @@ pub fn invalidErrorCode() noreturn { call("invalid error code", null); } -pub fn castTruncatedData() noreturn { - call("integer cast truncated bits", null); -} - -pub fn negativeToUnsigned() noreturn { - call("attempt to cast negative value to unsigned integer", null); +pub fn integerOutOfBounds() noreturn { + call("integer does not fit in destination type", null); } pub fn integerOverflow() noreturn { @@ -120,9 +116,6 @@ pub fn forLenMismatch() noreturn { call("for loop over objects with non-equal lengths", null); } -/// Delete after next zig1.wasm update -pub const memcpyLenMismatch = copyLenMismatch; - pub fn copyLenMismatch() noreturn { call("source and destination have non-equal lengths", null); } diff --git a/lib/std/hash/xxhash.zig b/lib/std/hash/xxhash.zig index f12f6341a4..b3128f39b2 100644 --- a/lib/std/hash/xxhash.zig +++ b/lib/std/hash/xxhash.zig @@ -780,7 +780,6 @@ fn testExpect(comptime H: type, seed: anytype, input: []const u8, expected: u64) } test "xxhash3" { - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; if (builtin.cpu.arch.isMIPS64() and (builtin.abi == .gnuabin32 or builtin.abi == .muslabin32)) return error.SkipZigTest; // https://github.com/ziglang/zig/issues/23807 const H = XxHash3; @@ -814,7 +813,6 @@ test "xxhash3" { } test "xxhash3 smhasher" { - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; if (builtin.cpu.arch.isMIPS64() and (builtin.abi == .gnuabin32 or builtin.abi == .muslabin32)) return error.SkipZigTest; // https://github.com/ziglang/zig/issues/23807 const Test = struct { @@ -828,7 +826,6 @@ test "xxhash3 smhasher" { } test "xxhash3 iterative api" { - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; if (builtin.cpu.arch.isMIPS64() and (builtin.abi == .gnuabin32 or builtin.abi == .muslabin32)) return error.SkipZigTest; // https://github.com/ziglang/zig/issues/23807 const Test = struct { diff --git a/lib/std/simd.zig b/lib/std/simd.zig index 2c2b92e36e..12bf84a9e8 100644 --- a/lib/std/simd.zig +++ b/lib/std/simd.zig @@ -231,8 +231,6 @@ pub fn extract( } test "vector patterns" { - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; - const base = @Vector(4, u32){ 10, 20, 30, 40 }; const other_base = @Vector(4, u32){ 55, 66, 77, 88 }; @@ -302,8 +300,6 @@ pub fn reverseOrder(vec: anytype) @TypeOf(vec) { } test "vector shifting" { - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; - const base = @Vector(4, u32){ 10, 20, 30, 40 }; try std.testing.expectEqual([4]u32{ 30, 40, 999, 999 }, shiftElementsLeft(base, 2, 999)); @@ -368,9 +364,6 @@ pub fn countElementsWithValue(vec: anytype, value: std.meta.Child(@TypeOf(vec))) } test "vector searching" { - if (builtin.zig_backend == .stage2_x86_64 and - !comptime std.Target.x86.featureSetHas(builtin.cpu.features, .ssse3)) return error.SkipZigTest; - const base = @Vector(8, u32){ 6, 4, 7, 4, 4, 2, 3, 7 }; try std.testing.expectEqual(@as(?u3, 1), firstIndexOfValue(base, 4)); @@ -462,7 +455,6 @@ pub fn prefixScan(comptime op: std.builtin.ReduceOp, comptime hop: isize, vec: a } test "vector prefix scan" { - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; if ((builtin.cpu.arch == .armeb or builtin.cpu.arch == .thumbeb) and builtin.zig_backend == .stage2_llvm) return error.SkipZigTest; // https://github.com/ziglang/zig/issues/22060 if (builtin.cpu.arch == .aarch64_be and builtin.zig_backend == .stage2_llvm) return error.SkipZigTest; // https://github.com/ziglang/zig/issues/21893 if (builtin.zig_backend == .stage2_llvm and builtin.cpu.arch == .hexagon) return error.SkipZigTest; diff --git a/lib/std/zig/AstGen.zig b/lib/std/zig/AstGen.zig index b349903742..90626b0046 100644 --- a/lib/std/zig/AstGen.zig +++ b/lib/std/zig/AstGen.zig @@ -11194,6 +11194,7 @@ fn rvalueInner( const as_void = @as(u64, @intFromEnum(Zir.Inst.Ref.void_type)) << 32; const as_comptime_int = @as(u64, @intFromEnum(Zir.Inst.Ref.comptime_int_type)) << 32; const as_usize = @as(u64, @intFromEnum(Zir.Inst.Ref.usize_type)) << 32; + const as_u1 = @as(u64, @intFromEnum(Zir.Inst.Ref.u1_type)) << 32; const as_u8 = @as(u64, @intFromEnum(Zir.Inst.Ref.u8_type)) << 32; switch ((@as(u64, @intFromEnum(ty_inst)) << 32) | @as(u64, @intFromEnum(result))) { as_ty | @intFromEnum(Zir.Inst.Ref.u1_type), @@ -11237,10 +11238,11 @@ fn rvalueInner( as_ty | @intFromEnum(Zir.Inst.Ref.null_type), as_ty | @intFromEnum(Zir.Inst.Ref.undefined_type), as_ty | @intFromEnum(Zir.Inst.Ref.enum_literal_type), + as_ty | @intFromEnum(Zir.Inst.Ref.ptr_usize_type), + as_ty | @intFromEnum(Zir.Inst.Ref.ptr_const_comptime_int_type), as_ty | @intFromEnum(Zir.Inst.Ref.manyptr_u8_type), as_ty | @intFromEnum(Zir.Inst.Ref.manyptr_const_u8_type), as_ty | @intFromEnum(Zir.Inst.Ref.manyptr_const_u8_sentinel_0_type), - as_ty | @intFromEnum(Zir.Inst.Ref.single_const_pointer_to_comptime_int_type), as_ty | @intFromEnum(Zir.Inst.Ref.slice_const_u8_type), as_ty | @intFromEnum(Zir.Inst.Ref.slice_const_u8_sentinel_0_type), as_ty | @intFromEnum(Zir.Inst.Ref.anyerror_void_error_union_type), @@ -11249,27 +11251,45 @@ fn rvalueInner( as_comptime_int | @intFromEnum(Zir.Inst.Ref.zero), as_comptime_int | @intFromEnum(Zir.Inst.Ref.one), as_comptime_int | @intFromEnum(Zir.Inst.Ref.negative_one), + as_usize | @intFromEnum(Zir.Inst.Ref.undef_usize), as_usize | @intFromEnum(Zir.Inst.Ref.zero_usize), as_usize | @intFromEnum(Zir.Inst.Ref.one_usize), + as_u1 | @intFromEnum(Zir.Inst.Ref.undef_u1), + as_u1 | @intFromEnum(Zir.Inst.Ref.zero_u1), + as_u1 | @intFromEnum(Zir.Inst.Ref.one_u1), as_u8 | @intFromEnum(Zir.Inst.Ref.zero_u8), as_u8 | @intFromEnum(Zir.Inst.Ref.one_u8), as_u8 | @intFromEnum(Zir.Inst.Ref.four_u8), + as_bool | @intFromEnum(Zir.Inst.Ref.undef_bool), as_bool | @intFromEnum(Zir.Inst.Ref.bool_true), as_bool | @intFromEnum(Zir.Inst.Ref.bool_false), as_void | @intFromEnum(Zir.Inst.Ref.void_value), => return result, // type of result is already correct + as_bool | @intFromEnum(Zir.Inst.Ref.undef) => return .undef_bool, + as_usize | @intFromEnum(Zir.Inst.Ref.undef) => return .undef_usize, + as_usize | @intFromEnum(Zir.Inst.Ref.undef_u1) => return .undef_usize, + as_u1 | @intFromEnum(Zir.Inst.Ref.undef) => return .undef_u1, + as_usize | @intFromEnum(Zir.Inst.Ref.zero) => return .zero_usize, + as_u1 | @intFromEnum(Zir.Inst.Ref.zero) => return .zero_u1, as_u8 | @intFromEnum(Zir.Inst.Ref.zero) => return .zero_u8, as_usize | @intFromEnum(Zir.Inst.Ref.one) => return .one_usize, + as_u1 | @intFromEnum(Zir.Inst.Ref.one) => return .one_u1, as_u8 | @intFromEnum(Zir.Inst.Ref.one) => return .one_u8, as_comptime_int | @intFromEnum(Zir.Inst.Ref.zero_usize) => return .zero, + as_u1 | @intFromEnum(Zir.Inst.Ref.zero_usize) => return .zero_u1, as_u8 | @intFromEnum(Zir.Inst.Ref.zero_usize) => return .zero_u8, as_comptime_int | @intFromEnum(Zir.Inst.Ref.one_usize) => return .one, + as_u1 | @intFromEnum(Zir.Inst.Ref.one_usize) => return .one_u1, as_u8 | @intFromEnum(Zir.Inst.Ref.one_usize) => return .one_u8, + as_comptime_int | @intFromEnum(Zir.Inst.Ref.zero_u1) => return .zero, as_comptime_int | @intFromEnum(Zir.Inst.Ref.zero_u8) => return .zero, + as_usize | @intFromEnum(Zir.Inst.Ref.zero_u1) => return .zero_usize, as_usize | @intFromEnum(Zir.Inst.Ref.zero_u8) => return .zero_usize, + as_comptime_int | @intFromEnum(Zir.Inst.Ref.one_u1) => return .one, as_comptime_int | @intFromEnum(Zir.Inst.Ref.one_u8) => return .one, + as_usize | @intFromEnum(Zir.Inst.Ref.one_u1) => return .one_usize, as_usize | @intFromEnum(Zir.Inst.Ref.one_u8) => return .one_usize, // Need an explicit type coercion instruction. diff --git a/lib/std/zig/Zir.zig b/lib/std/zig/Zir.zig index 937b399a5d..440b4df9fc 100644 --- a/lib/std/zig/Zir.zig +++ b/lib/std/zig/Zir.zig @@ -2142,7 +2142,7 @@ pub const Inst = struct { ref_start_index = static_len, _, - pub const static_len = 118; + pub const static_len = 124; pub fn toRef(i: Index) Inst.Ref { return @enumFromInt(@intFromEnum(Index.ref_start_index) + @intFromEnum(i)); @@ -2220,10 +2220,11 @@ pub const Inst = struct { null_type, undefined_type, enum_literal_type, + ptr_usize_type, + ptr_const_comptime_int_type, manyptr_u8_type, manyptr_const_u8_type, manyptr_const_u8_sentinel_0_type, - single_const_pointer_to_comptime_int_type, slice_const_u8_type, slice_const_u8_sentinel_0_type, vector_8_i8_type, @@ -2279,11 +2280,16 @@ pub const Inst = struct { generic_poison_type, empty_tuple_type, undef, + undef_bool, + undef_usize, + undef_u1, zero, zero_usize, + zero_u1, zero_u8, one, one_usize, + one_u1, one_u8, four_u8, negative_one, diff --git a/src/Air.zig b/src/Air.zig index b468edbb47..b315acecce 100644 --- a/src/Air.zig +++ b/src/Air.zig @@ -50,8 +50,6 @@ pub const Inst = struct { /// is the same as both operands. /// The panic handler function must be populated before lowering AIR /// that contains this instruction. - /// This instruction will only be emitted if the backend has the - /// feature `safety_checked_instructions`. /// Uses the `bin_op` field. add_safe, /// Float addition. The instruction is allowed to have equal or more @@ -79,8 +77,6 @@ pub const Inst = struct { /// is the same as both operands. /// The panic handler function must be populated before lowering AIR /// that contains this instruction. - /// This instruction will only be emitted if the backend has the - /// feature `safety_checked_instructions`. /// Uses the `bin_op` field. sub_safe, /// Float subtraction. The instruction is allowed to have equal or more @@ -108,8 +104,6 @@ pub const Inst = struct { /// is the same as both operands. /// The panic handler function must be populated before lowering AIR /// that contains this instruction. - /// This instruction will only be emitted if the backend has the - /// feature `safety_checked_instructions`. /// Uses the `bin_op` field. mul_safe, /// Float multiplication. The instruction is allowed to have equal or more @@ -705,9 +699,21 @@ pub const Inst = struct { /// equal to the scalar value. /// Uses the `ty_op` field. splat, - /// Constructs a vector by selecting elements from `a` and `b` based on `mask`. - /// Uses the `ty_pl` field with payload `Shuffle`. - shuffle, + /// Constructs a vector by selecting elements from a single vector based on a mask. Each + /// mask element is either an index into the vector, or a comptime-known value, or "undef". + /// Uses the `ty_pl` field, where the payload index points to: + /// 1. mask_elem: ShuffleOneMask // for each `mask_len`, which comes from `ty_pl.ty` + /// 2. operand: Ref // guaranteed not to be an interned value + /// See `unwrapShuffleOne`. + shuffle_one, + /// Constructs a vector by selecting elements from two vectors based on a mask. Each mask + /// element is either an index into one of the vectors, or "undef". + /// Uses the `ty_pl` field, where the payload index points to: + /// 1. mask_elem: ShuffleOneMask // for each `mask_len`, which comes from `ty_pl.ty` + /// 2. operand_a: Ref // guaranteed not to be an interned value + /// 3. operand_b: Ref // guaranteed not to be an interned value + /// See `unwrapShuffleTwo`. + shuffle_two, /// Constructs a vector element-wise from `a` or `b` based on `pred`. /// Uses the `pl_op` field with `pred` as operand, and payload `Bin`. select, @@ -1011,10 +1017,11 @@ pub const Inst = struct { null_type = @intFromEnum(InternPool.Index.null_type), undefined_type = @intFromEnum(InternPool.Index.undefined_type), enum_literal_type = @intFromEnum(InternPool.Index.enum_literal_type), + ptr_usize_type = @intFromEnum(InternPool.Index.ptr_usize_type), + ptr_const_comptime_int_type = @intFromEnum(InternPool.Index.ptr_const_comptime_int_type), manyptr_u8_type = @intFromEnum(InternPool.Index.manyptr_u8_type), manyptr_const_u8_type = @intFromEnum(InternPool.Index.manyptr_const_u8_type), manyptr_const_u8_sentinel_0_type = @intFromEnum(InternPool.Index.manyptr_const_u8_sentinel_0_type), - single_const_pointer_to_comptime_int_type = @intFromEnum(InternPool.Index.single_const_pointer_to_comptime_int_type), slice_const_u8_type = @intFromEnum(InternPool.Index.slice_const_u8_type), slice_const_u8_sentinel_0_type = @intFromEnum(InternPool.Index.slice_const_u8_sentinel_0_type), vector_8_i8_type = @intFromEnum(InternPool.Index.vector_8_i8_type), @@ -1070,11 +1077,16 @@ pub const Inst = struct { generic_poison_type = @intFromEnum(InternPool.Index.generic_poison_type), empty_tuple_type = @intFromEnum(InternPool.Index.empty_tuple_type), undef = @intFromEnum(InternPool.Index.undef), + undef_bool = @intFromEnum(InternPool.Index.undef_bool), + undef_usize = @intFromEnum(InternPool.Index.undef_usize), + undef_u1 = @intFromEnum(InternPool.Index.undef_u1), zero = @intFromEnum(InternPool.Index.zero), zero_usize = @intFromEnum(InternPool.Index.zero_usize), + zero_u1 = @intFromEnum(InternPool.Index.zero_u1), zero_u8 = @intFromEnum(InternPool.Index.zero_u8), one = @intFromEnum(InternPool.Index.one), one_usize = @intFromEnum(InternPool.Index.one_usize), + one_u1 = @intFromEnum(InternPool.Index.one_u1), one_u8 = @intFromEnum(InternPool.Index.one_u8), four_u8 = @intFromEnum(InternPool.Index.four_u8), negative_one = @intFromEnum(InternPool.Index.negative_one), @@ -1121,7 +1133,7 @@ pub const Inst = struct { } pub fn toType(ref: Ref) Type { - return Type.fromInterned(ref.toInterned().?); + return .fromInterned(ref.toInterned().?); } }; @@ -1241,10 +1253,10 @@ pub const CondBr = struct { else_body_len: u32, branch_hints: BranchHints, pub const BranchHints = packed struct(u32) { - true: std.builtin.BranchHint, - false: std.builtin.BranchHint, - then_cov: CoveragePoint, - else_cov: CoveragePoint, + true: std.builtin.BranchHint = .none, + false: std.builtin.BranchHint = .none, + then_cov: CoveragePoint = .none, + else_cov: CoveragePoint = .none, _: u24 = 0, }; }; @@ -1299,13 +1311,6 @@ pub const FieldParentPtr = struct { field_index: u32, }; -pub const Shuffle = struct { - a: Inst.Ref, - b: Inst.Ref, - mask: InternPool.Index, - mask_len: u32, -}; - pub const VectorCmp = struct { lhs: Inst.Ref, rhs: Inst.Ref, @@ -1320,6 +1325,64 @@ pub const VectorCmp = struct { } }; +/// Used by `Inst.Tag.shuffle_one`. Represents a mask element which either indexes into a +/// runtime-known vector, or is a comptime-known value. +pub const ShuffleOneMask = packed struct(u32) { + index: u31, + kind: enum(u1) { elem, value }, + pub fn elem(idx: u32) ShuffleOneMask { + return .{ .index = @intCast(idx), .kind = .elem }; + } + pub fn value(val: Value) ShuffleOneMask { + return .{ .index = @intCast(@intFromEnum(val.toIntern())), .kind = .value }; + } + pub const Unwrapped = union(enum) { + /// The resulting element is this index into the runtime vector. + elem: u32, + /// The resulting element is this comptime-known value. + /// It is correctly typed. It might be `undefined`. + value: InternPool.Index, + }; + pub fn unwrap(raw: ShuffleOneMask) Unwrapped { + return switch (raw.kind) { + .elem => .{ .elem = raw.index }, + .value => .{ .value = @enumFromInt(raw.index) }, + }; + } +}; + +/// Used by `Inst.Tag.shuffle_two`. Represents a mask element which either indexes into one +/// of two runtime-known vectors, or is undefined. +pub const ShuffleTwoMask = enum(u32) { + undef = std.math.maxInt(u32), + _, + pub fn aElem(idx: u32) ShuffleTwoMask { + return @enumFromInt(idx << 1); + } + pub fn bElem(idx: u32) ShuffleTwoMask { + return @enumFromInt(idx << 1 | 1); + } + pub const Unwrapped = union(enum) { + /// The resulting element is this index into the first runtime vector. + a_elem: u32, + /// The resulting element is this index into the second runtime vector. + b_elem: u32, + /// The resulting element is `undefined`. + undef, + }; + pub fn unwrap(raw: ShuffleTwoMask) Unwrapped { + switch (raw) { + .undef => return .undef, + _ => {}, + } + const x = @intFromEnum(raw); + return switch (@as(u1, @truncate(x))) { + 0 => .{ .a_elem = x >> 1 }, + 1 => .{ .b_elem = x >> 1 }, + }; + } +}; + /// Trailing: /// 0. `Inst.Ref` for every outputs_len /// 1. `Inst.Ref` for every inputs_len @@ -1393,7 +1456,7 @@ pub fn getMainBody(air: Air) []const Air.Inst.Index { pub fn typeOf(air: *const Air, inst: Air.Inst.Ref, ip: *const InternPool) Type { if (inst.toInterned()) |ip_index| { - return Type.fromInterned(ip.typeOf(ip_index)); + return .fromInterned(ip.typeOf(ip_index)); } else { return air.typeOfIndex(inst.toIndex().?, ip); } @@ -1483,7 +1546,7 @@ pub fn typeOfIndex(air: *const Air, inst: Air.Inst.Index, ip: *const InternPool) .is_non_err_ptr, .is_named_enum_value, .error_set_has_value, - => return Type.bool, + => return .bool, .alloc, .ret_ptr, @@ -1503,7 +1566,6 @@ pub fn typeOfIndex(air: *const Air, inst: Air.Inst.Index, ip: *const InternPool) .cmpxchg_weak, .cmpxchg_strong, .slice, - .shuffle, .aggregate_init, .union_init, .field_parent_ptr, @@ -1517,6 +1579,8 @@ pub fn typeOfIndex(air: *const Air, inst: Air.Inst.Index, ip: *const InternPool) .ptr_sub, .try_ptr, .try_ptr_cold, + .shuffle_one, + .shuffle_two, => return datas[@intFromEnum(inst)].ty_pl.ty.toType(), .not, @@ -1574,7 +1638,7 @@ pub fn typeOfIndex(air: *const Air, inst: Air.Inst.Index, ip: *const InternPool) .ret_load, .unreach, .trap, - => return Type.noreturn, + => return .noreturn, .breakpoint, .dbg_stmt, @@ -1597,22 +1661,22 @@ pub fn typeOfIndex(air: *const Air, inst: Air.Inst.Index, ip: *const InternPool) .set_err_return_trace, .vector_store_elem, .c_va_end, - => return Type.void, + => return .void, .slice_len, .ret_addr, .frame_addr, .save_err_return_trace_index, - => return Type.usize, + => return .usize, - .wasm_memory_grow => return Type.isize, - .wasm_memory_size => return Type.usize, + .wasm_memory_grow => return .isize, + .wasm_memory_size => return .usize, - .tag_name, .error_name => return Type.slice_const_u8_sentinel_0, + .tag_name, .error_name => return .slice_const_u8_sentinel_0, .call, .call_always_tail, .call_never_tail, .call_never_inline => { const callee_ty = air.typeOf(datas[@intFromEnum(inst)].pl_op.operand, ip); - return Type.fromInterned(ip.funcTypeReturnType(callee_ty.toIntern())); + return .fromInterned(ip.funcTypeReturnType(callee_ty.toIntern())); }, .slice_elem_val, .ptr_elem_val, .array_elem_val => { @@ -1630,7 +1694,7 @@ pub fn typeOfIndex(air: *const Air, inst: Air.Inst.Index, ip: *const InternPool) .reduce, .reduce_optimized => { const operand_ty = air.typeOf(datas[@intFromEnum(inst)].reduce.operand, ip); - return Type.fromInterned(ip.indexToKey(operand_ty.ip_index).vector_type.child); + return .fromInterned(ip.indexToKey(operand_ty.ip_index).vector_type.child); }, .mul_add => return air.typeOf(datas[@intFromEnum(inst)].pl_op.operand, ip), @@ -1641,7 +1705,7 @@ pub fn typeOfIndex(air: *const Air, inst: Air.Inst.Index, ip: *const InternPool) .@"try", .try_cold => { const err_union_ty = air.typeOf(datas[@intFromEnum(inst)].pl_op.operand, ip); - return Type.fromInterned(ip.indexToKey(err_union_ty.ip_index).error_union_type.payload_type); + return .fromInterned(ip.indexToKey(err_union_ty.ip_index).error_union_type.payload_type); }, .tlv_dllimport_ptr => return .fromInterned(datas[@intFromEnum(inst)].ty_nav.ty), @@ -1649,7 +1713,7 @@ pub fn typeOfIndex(air: *const Air, inst: Air.Inst.Index, ip: *const InternPool) .work_item_id, .work_group_size, .work_group_id, - => return Type.u32, + => return .u32, .inferred_alloc => unreachable, .inferred_alloc_comptime => unreachable, @@ -1696,7 +1760,7 @@ pub fn internedToRef(ip_index: InternPool.Index) Inst.Ref { /// Returns `null` if runtime-known. pub fn value(air: Air, inst: Inst.Ref, pt: Zcu.PerThread) !?Value { if (inst.toInterned()) |ip_index| { - return Value.fromInterned(ip_index); + return .fromInterned(ip_index); } const index = inst.toIndex().?; return air.typeOfIndex(index, &pt.zcu.intern_pool).onePossibleValue(pt); @@ -1903,7 +1967,8 @@ pub fn mustLower(air: Air, inst: Air.Inst.Index, ip: *const InternPool) bool { .reduce, .reduce_optimized, .splat, - .shuffle, + .shuffle_one, + .shuffle_two, .select, .is_named_enum_value, .tag_name, @@ -2030,6 +2095,48 @@ pub fn unwrapSwitch(air: *const Air, switch_inst: Inst.Index) UnwrappedSwitch { }; } +pub fn unwrapShuffleOne(air: *const Air, zcu: *const Zcu, inst_index: Inst.Index) struct { + result_ty: Type, + operand: Inst.Ref, + mask: []const ShuffleOneMask, +} { + const inst = air.instructions.get(@intFromEnum(inst_index)); + switch (inst.tag) { + .shuffle_one => {}, + else => unreachable, // assertion failure + } + const result_ty: Type = .fromInterned(inst.data.ty_pl.ty.toInterned().?); + const mask_len: u32 = result_ty.vectorLen(zcu); + const extra_idx = inst.data.ty_pl.payload; + return .{ + .result_ty = result_ty, + .operand = @enumFromInt(air.extra.items[extra_idx + mask_len]), + .mask = @ptrCast(air.extra.items[extra_idx..][0..mask_len]), + }; +} + +pub fn unwrapShuffleTwo(air: *const Air, zcu: *const Zcu, inst_index: Inst.Index) struct { + result_ty: Type, + operand_a: Inst.Ref, + operand_b: Inst.Ref, + mask: []const ShuffleTwoMask, +} { + const inst = air.instructions.get(@intFromEnum(inst_index)); + switch (inst.tag) { + .shuffle_two => {}, + else => unreachable, // assertion failure + } + const result_ty: Type = .fromInterned(inst.data.ty_pl.ty.toInterned().?); + const mask_len: u32 = result_ty.vectorLen(zcu); + const extra_idx = inst.data.ty_pl.payload; + return .{ + .result_ty = result_ty, + .operand_a = @enumFromInt(air.extra.items[extra_idx + mask_len + 0]), + .operand_b = @enumFromInt(air.extra.items[extra_idx + mask_len + 1]), + .mask = @ptrCast(air.extra.items[extra_idx..][0..mask_len]), + }; +} + pub const typesFullyResolved = types_resolved.typesFullyResolved; pub const typeFullyResolved = types_resolved.checkType; pub const valFullyResolved = types_resolved.checkVal; diff --git a/src/Air/Legalize.zig b/src/Air/Legalize.zig index 3d38798aa3..8f36ba21a7 100644 --- a/src/Air/Legalize.zig +++ b/src/Air/Legalize.zig @@ -1,147 +1,1737 @@ -zcu: *const Zcu, -air: Air, -features: std.enums.EnumSet(Feature), +pt: Zcu.PerThread, +air_instructions: std.MultiArrayList(Air.Inst), +air_extra: std.ArrayListUnmanaged(u32), +features: *const Features, pub const Feature = enum { + scalarize_add, + scalarize_add_safe, + scalarize_add_optimized, + scalarize_add_wrap, + scalarize_add_sat, + scalarize_sub, + scalarize_sub_safe, + scalarize_sub_optimized, + scalarize_sub_wrap, + scalarize_sub_sat, + scalarize_mul, + scalarize_mul_safe, + scalarize_mul_optimized, + scalarize_mul_wrap, + scalarize_mul_sat, + scalarize_div_float, + scalarize_div_float_optimized, + scalarize_div_trunc, + scalarize_div_trunc_optimized, + scalarize_div_floor, + scalarize_div_floor_optimized, + scalarize_div_exact, + scalarize_div_exact_optimized, + scalarize_rem, + scalarize_rem_optimized, + scalarize_mod, + scalarize_mod_optimized, + scalarize_max, + scalarize_min, + scalarize_add_with_overflow, + scalarize_sub_with_overflow, + scalarize_mul_with_overflow, + scalarize_shl_with_overflow, + scalarize_bit_and, + scalarize_bit_or, + scalarize_shr, + scalarize_shr_exact, + scalarize_shl, + scalarize_shl_exact, + scalarize_shl_sat, + scalarize_xor, + scalarize_not, + scalarize_bitcast, + scalarize_clz, + scalarize_ctz, + scalarize_popcount, + scalarize_byte_swap, + scalarize_bit_reverse, + scalarize_sqrt, + scalarize_sin, + scalarize_cos, + scalarize_tan, + scalarize_exp, + scalarize_exp2, + scalarize_log, + scalarize_log2, + scalarize_log10, + scalarize_abs, + scalarize_floor, + scalarize_ceil, + scalarize_round, + scalarize_trunc_float, + scalarize_neg, + scalarize_neg_optimized, + scalarize_cmp_vector, + scalarize_cmp_vector_optimized, + scalarize_fptrunc, + scalarize_fpext, + scalarize_intcast, + scalarize_intcast_safe, + scalarize_trunc, + scalarize_int_from_float, + scalarize_int_from_float_optimized, + scalarize_float_from_int, + scalarize_shuffle_one, + scalarize_shuffle_two, + scalarize_select, + scalarize_mul_add, + /// Legalize (shift lhs, (splat rhs)) -> (shift lhs, rhs) - remove_shift_vector_rhs_splat, + unsplat_shift_rhs, /// Legalize reduce of a one element vector to a bitcast reduce_one_elem_to_bitcast, + + /// Replace `intcast_safe` with an explicit safety check which `call`s the panic function on failure. + /// Not compatible with `scalarize_intcast_safe`. + expand_intcast_safe, + /// Replace `add_safe` with an explicit safety check which `call`s the panic function on failure. + /// Not compatible with `scalarize_add_safe`. + expand_add_safe, + /// Replace `sub_safe` with an explicit safety check which `call`s the panic function on failure. + /// Not compatible with `scalarize_sub_safe`. + expand_sub_safe, + /// Replace `mul_safe` with an explicit safety check which `call`s the panic function on failure. + /// Not compatible with `scalarize_mul_safe`. + expand_mul_safe, + + fn scalarize(tag: Air.Inst.Tag) Feature { + return switch (tag) { + else => unreachable, + .add => .scalarize_add, + .add_safe => .scalarize_add_safe, + .add_optimized => .scalarize_add_optimized, + .add_wrap => .scalarize_add_wrap, + .add_sat => .scalarize_add_sat, + .sub => .scalarize_sub, + .sub_safe => .scalarize_sub_safe, + .sub_optimized => .scalarize_sub_optimized, + .sub_wrap => .scalarize_sub_wrap, + .sub_sat => .scalarize_sub_sat, + .mul => .scalarize_mul, + .mul_safe => .scalarize_mul_safe, + .mul_optimized => .scalarize_mul_optimized, + .mul_wrap => .scalarize_mul_wrap, + .mul_sat => .scalarize_mul_sat, + .div_float => .scalarize_div_float, + .div_float_optimized => .scalarize_div_float_optimized, + .div_trunc => .scalarize_div_trunc, + .div_trunc_optimized => .scalarize_div_trunc_optimized, + .div_floor => .scalarize_div_floor, + .div_floor_optimized => .scalarize_div_floor_optimized, + .div_exact => .scalarize_div_exact, + .div_exact_optimized => .scalarize_div_exact_optimized, + .rem => .scalarize_rem, + .rem_optimized => .scalarize_rem_optimized, + .mod => .scalarize_mod, + .mod_optimized => .scalarize_mod_optimized, + .max => .scalarize_max, + .min => .scalarize_min, + .add_with_overflow => .scalarize_add_with_overflow, + .sub_with_overflow => .scalarize_sub_with_overflow, + .mul_with_overflow => .scalarize_mul_with_overflow, + .shl_with_overflow => .scalarize_shl_with_overflow, + .bit_and => .scalarize_bit_and, + .bit_or => .scalarize_bit_or, + .shr => .scalarize_shr, + .shr_exact => .scalarize_shr_exact, + .shl => .scalarize_shl, + .shl_exact => .scalarize_shl_exact, + .shl_sat => .scalarize_shl_sat, + .xor => .scalarize_xor, + .not => .scalarize_not, + .bitcast => .scalarize_bitcast, + .clz => .scalarize_clz, + .ctz => .scalarize_ctz, + .popcount => .scalarize_popcount, + .byte_swap => .scalarize_byte_swap, + .bit_reverse => .scalarize_bit_reverse, + .sqrt => .scalarize_sqrt, + .sin => .scalarize_sin, + .cos => .scalarize_cos, + .tan => .scalarize_tan, + .exp => .scalarize_exp, + .exp2 => .scalarize_exp2, + .log => .scalarize_log, + .log2 => .scalarize_log2, + .log10 => .scalarize_log10, + .abs => .scalarize_abs, + .floor => .scalarize_floor, + .ceil => .scalarize_ceil, + .round => .scalarize_round, + .trunc_float => .scalarize_trunc_float, + .neg => .scalarize_neg, + .neg_optimized => .scalarize_neg_optimized, + .cmp_vector => .scalarize_cmp_vector, + .cmp_vector_optimized => .scalarize_cmp_vector_optimized, + .fptrunc => .scalarize_fptrunc, + .fpext => .scalarize_fpext, + .intcast => .scalarize_intcast, + .intcast_safe => .scalarize_intcast_safe, + .trunc => .scalarize_trunc, + .int_from_float => .scalarize_int_from_float, + .int_from_float_optimized => .scalarize_int_from_float_optimized, + .float_from_int => .scalarize_float_from_int, + .shuffle_one => .scalarize_shuffle_one, + .shuffle_two => .scalarize_shuffle_two, + .select => .scalarize_selects, + .mul_add => .scalarize_mul_add, + }; + } }; -pub const Features = std.enums.EnumFieldStruct(Feature, bool, false); +pub const Features = std.enums.EnumSet(Feature); -pub fn legalize(air: *Air, backend: std.builtin.CompilerBackend, zcu: *const Zcu) std.mem.Allocator.Error!void { +pub const Error = std.mem.Allocator.Error; + +pub fn legalize(air: *Air, pt: Zcu.PerThread, features: *const Features) Error!void { + dev.check(.legalize); + assert(!features.bits.eql(.initEmpty())); // backend asked to run legalize, but no features were enabled var l: Legalize = .{ - .zcu = zcu, - .air = air.*, - .features = features: switch (backend) { - .other, .stage1 => unreachable, - inline .stage2_llvm, - .stage2_c, - .stage2_wasm, - .stage2_arm, - .stage2_x86_64, - .stage2_aarch64, - .stage2_x86, - .stage2_riscv64, - .stage2_sparc64, - .stage2_spirv64, - .stage2_powerpc, - => |ct_backend| { - const Backend = codegen.importBackend(ct_backend) orelse break :features .initEmpty(); - break :features if (@hasDecl(Backend, "legalize_features")) - .init(Backend.legalize_features) - else - .initEmpty(); - }, - _ => unreachable, - }, + .pt = pt, + .air_instructions = air.instructions.toMultiArrayList(), + .air_extra = air.extra, + .features = features, }; - defer air.* = l.air; - if (!l.features.bits.eql(.initEmpty())) try l.legalizeBody(l.air.getMainBody()); + defer air.* = l.getTmpAir(); + const main_extra = l.extraData(Air.Block, l.air_extra.items[@intFromEnum(Air.ExtraIndex.main_block)]); + try l.legalizeBody(main_extra.end, main_extra.data.body_len); } -fn legalizeBody(l: *Legalize, body: []const Air.Inst.Index) std.mem.Allocator.Error!void { - const zcu = l.zcu; +fn getTmpAir(l: *const Legalize) Air { + return .{ + .instructions = l.air_instructions.slice(), + .extra = l.air_extra, + }; +} + +fn typeOf(l: *const Legalize, ref: Air.Inst.Ref) Type { + return l.getTmpAir().typeOf(ref, &l.pt.zcu.intern_pool); +} + +fn typeOfIndex(l: *const Legalize, inst: Air.Inst.Index) Type { + return l.getTmpAir().typeOfIndex(inst, &l.pt.zcu.intern_pool); +} + +fn extraData(l: *const Legalize, comptime T: type, index: usize) @TypeOf(Air.extraData(undefined, T, undefined)) { + return l.getTmpAir().extraData(T, index); +} + +fn legalizeBody(l: *Legalize, body_start: usize, body_len: usize) Error!void { + const zcu = l.pt.zcu; const ip = &zcu.intern_pool; - const tags = l.air.instructions.items(.tag); - const data = l.air.instructions.items(.data); - for (body) |inst| inst: switch (tags[@intFromEnum(inst)]) { - else => {}, - - .shl, - .shl_exact, - .shl_sat, - .shr, - .shr_exact, - => |air_tag| if (l.features.contains(.remove_shift_vector_rhs_splat)) done: { - const bin_op = data[@intFromEnum(inst)].bin_op; - const ty = l.air.typeOf(bin_op.rhs, ip); - if (!ty.isVector(zcu)) break :done; - if (bin_op.rhs.toInterned()) |rhs_ip_index| switch (ip.indexToKey(rhs_ip_index)) { - else => {}, - .aggregate => |aggregate| switch (aggregate.storage) { - else => {}, - .repeated_elem => |splat| continue :inst l.replaceInst(inst, air_tag, .{ .bin_op = .{ - .lhs = bin_op.lhs, - .rhs = Air.internedToRef(splat), - } }), - }, - } else { - const rhs_inst = bin_op.rhs.toIndex().?; - switch (tags[@intFromEnum(rhs_inst)]) { - else => {}, - .splat => continue :inst l.replaceInst(inst, air_tag, .{ .bin_op = .{ - .lhs = bin_op.lhs, - .rhs = data[@intFromEnum(rhs_inst)].ty_op.operand, - } }), + for (0..body_len) |body_index| { + const inst: Air.Inst.Index = @enumFromInt(l.air_extra.items[body_start + body_index]); + inst: switch (l.air_instructions.items(.tag)[@intFromEnum(inst)]) { + .arg, + => {}, + inline .add, + .add_optimized, + .add_wrap, + .add_sat, + .sub, + .sub_optimized, + .sub_wrap, + .sub_sat, + .mul, + .mul_optimized, + .mul_wrap, + .mul_sat, + .div_float, + .div_float_optimized, + .div_trunc, + .div_trunc_optimized, + .div_floor, + .div_floor_optimized, + .div_exact, + .div_exact_optimized, + .rem, + .rem_optimized, + .mod, + .mod_optimized, + .max, + .min, + .bit_and, + .bit_or, + .xor, + => |air_tag| if (l.features.contains(comptime .scalarize(air_tag))) { + const bin_op = l.air_instructions.items(.data)[@intFromEnum(inst)].bin_op; + if (l.typeOf(bin_op.lhs).isVector(zcu)) continue :inst try l.scalarize(inst, .bin_op); + }, + .add_safe => if (l.features.contains(.expand_add_safe)) { + assert(!l.features.contains(.scalarize_add_safe)); // it doesn't make sense to do both + continue :inst l.replaceInst(inst, .block, try l.safeArithmeticBlockPayload(inst, .add_with_overflow)); + } else if (l.features.contains(.scalarize_add_safe)) { + const bin_op = l.air_instructions.items(.data)[@intFromEnum(inst)].bin_op; + if (l.typeOf(bin_op.lhs).isVector(zcu)) continue :inst try l.scalarize(inst, .bin_op); + }, + .sub_safe => if (l.features.contains(.expand_sub_safe)) { + assert(!l.features.contains(.scalarize_sub_safe)); // it doesn't make sense to do both + continue :inst l.replaceInst(inst, .block, try l.safeArithmeticBlockPayload(inst, .sub_with_overflow)); + } else if (l.features.contains(.scalarize_sub_safe)) { + const bin_op = l.air_instructions.items(.data)[@intFromEnum(inst)].bin_op; + if (l.typeOf(bin_op.lhs).isVector(zcu)) continue :inst try l.scalarize(inst, .bin_op); + }, + .mul_safe => if (l.features.contains(.expand_mul_safe)) { + assert(!l.features.contains(.scalarize_mul_safe)); // it doesn't make sense to do both + continue :inst l.replaceInst(inst, .block, try l.safeArithmeticBlockPayload(inst, .mul_with_overflow)); + } else if (l.features.contains(.scalarize_mul_safe)) { + const bin_op = l.air_instructions.items(.data)[@intFromEnum(inst)].bin_op; + if (l.typeOf(bin_op.lhs).isVector(zcu)) continue :inst try l.scalarize(inst, .bin_op); + }, + .ptr_add, + .ptr_sub, + => {}, + inline .add_with_overflow, + .sub_with_overflow, + .mul_with_overflow, + .shl_with_overflow, + => |air_tag| if (l.features.contains(comptime .scalarize(air_tag))) { + const ty_pl = l.air_instructions.items(.data)[@intFromEnum(inst)].ty_pl; + if (ty_pl.ty.toType().fieldType(0, zcu).isVector(zcu)) continue :inst l.replaceInst(inst, .block, try l.scalarizeOverflowBlockPayload(inst)); + }, + .alloc, + => {}, + .inferred_alloc, + .inferred_alloc_comptime, + => unreachable, + .ret_ptr, + .assembly, + => {}, + inline .shr, + .shr_exact, + .shl, + .shl_exact, + .shl_sat, + => |air_tag| done: { + const bin_op = l.air_instructions.items(.data)[@intFromEnum(inst)].bin_op; + if (!l.typeOf(bin_op.rhs).isVector(zcu)) break :done; + if (l.features.contains(.unsplat_shift_rhs)) { + if (bin_op.rhs.toInterned()) |rhs_ip_index| switch (ip.indexToKey(rhs_ip_index)) { + else => {}, + .aggregate => |aggregate| switch (aggregate.storage) { + else => {}, + .repeated_elem => |splat| continue :inst l.replaceInst(inst, air_tag, .{ .bin_op = .{ + .lhs = bin_op.lhs, + .rhs = Air.internedToRef(splat), + } }), + }, + } else { + const rhs_inst = bin_op.rhs.toIndex().?; + switch (l.air_instructions.items(.tag)[@intFromEnum(rhs_inst)]) { + else => {}, + .splat => continue :inst l.replaceInst(inst, air_tag, .{ .bin_op = .{ + .lhs = bin_op.lhs, + .rhs = l.air_instructions.items(.data)[@intFromEnum(rhs_inst)].ty_op.operand, + } }), + } + } } - } - }, - - .reduce, - .reduce_optimized, - => if (l.features.contains(.reduce_one_elem_to_bitcast)) done: { - const reduce = data[@intFromEnum(inst)].reduce; - const vector_ty = l.air.typeOf(reduce.operand, ip); - switch (vector_ty.vectorLen(zcu)) { - 0 => unreachable, - 1 => continue :inst l.replaceInst(inst, .bitcast, .{ .ty_op = .{ - .ty = Air.internedToRef(vector_ty.scalarType(zcu).toIntern()), - .operand = reduce.operand, - } }), - else => break :done, - } - }, - - .@"try", .try_cold => { - const pl_op = data[@intFromEnum(inst)].pl_op; - const extra = l.air.extraData(Air.Try, pl_op.payload); - try l.legalizeBody(@ptrCast(l.air.extra.items[extra.end..][0..extra.data.body_len])); - }, - .try_ptr, .try_ptr_cold => { - const ty_pl = data[@intFromEnum(inst)].ty_pl; - const extra = l.air.extraData(Air.TryPtr, ty_pl.payload); - try l.legalizeBody(@ptrCast(l.air.extra.items[extra.end..][0..extra.data.body_len])); - }, - .block, .loop => { - const ty_pl = data[@intFromEnum(inst)].ty_pl; - const extra = l.air.extraData(Air.Block, ty_pl.payload); - try l.legalizeBody(@ptrCast(l.air.extra.items[extra.end..][0..extra.data.body_len])); - }, - .dbg_inline_block => { - const ty_pl = data[@intFromEnum(inst)].ty_pl; - const extra = l.air.extraData(Air.DbgInlineBlock, ty_pl.payload); - try l.legalizeBody(@ptrCast(l.air.extra.items[extra.end..][0..extra.data.body_len])); - }, - .cond_br => { - const pl_op = data[@intFromEnum(inst)].pl_op; - const extra = l.air.extraData(Air.CondBr, pl_op.payload); - try l.legalizeBody(@ptrCast(l.air.extra.items[extra.end..][0..extra.data.then_body_len])); - try l.legalizeBody(@ptrCast(l.air.extra.items[extra.end + extra.data.then_body_len ..][0..extra.data.else_body_len])); - }, - .switch_br, .loop_switch_br => { - const switch_br = l.air.unwrapSwitch(inst); - var it = switch_br.iterateCases(); - while (it.next()) |case| try l.legalizeBody(case.body); - try l.legalizeBody(it.elseBody()); - }, - }; + if (l.features.contains(comptime .scalarize(air_tag))) continue :inst try l.scalarize(inst, .bin_op); + }, + inline .not, + .clz, + .ctz, + .popcount, + .byte_swap, + .bit_reverse, + .abs, + .fptrunc, + .fpext, + .intcast, + .trunc, + .int_from_float, + .int_from_float_optimized, + .float_from_int, + => |air_tag| if (l.features.contains(comptime .scalarize(air_tag))) { + const ty_op = l.air_instructions.items(.data)[@intFromEnum(inst)].ty_op; + if (ty_op.ty.toType().isVector(zcu)) continue :inst try l.scalarize(inst, .ty_op); + }, + inline .bitcast, + => |air_tag| if (l.features.contains(comptime .scalarize(air_tag))) { + const ty_op = l.air_instructions.items(.data)[@intFromEnum(inst)].ty_op; + const to_ty = ty_op.ty.toType(); + const from_ty = l.typeOf(ty_op.operand); + if (to_ty.isVector(zcu) and from_ty.isVector(zcu) and to_ty.vectorLen(zcu) == from_ty.vectorLen(zcu)) + continue :inst try l.scalarize(inst, .ty_op); + }, + .intcast_safe => if (l.features.contains(.expand_intcast_safe)) { + assert(!l.features.contains(.scalarize_intcast_safe)); // it doesn't make sense to do both + continue :inst l.replaceInst(inst, .block, try l.safeIntcastBlockPayload(inst)); + } else if (l.features.contains(.scalarize_intcast_safe)) { + const ty_op = l.air_instructions.items(.data)[@intFromEnum(inst)].ty_op; + if (ty_op.ty.toType().isVector(zcu)) continue :inst try l.scalarize(inst, .ty_op); + }, + .block, + .loop, + => { + const ty_pl = l.air_instructions.items(.data)[@intFromEnum(inst)].ty_pl; + const extra = l.extraData(Air.Block, ty_pl.payload); + try l.legalizeBody(extra.end, extra.data.body_len); + }, + .repeat, + .br, + .trap, + .breakpoint, + .ret_addr, + .frame_addr, + .call, + .call_always_tail, + .call_never_tail, + .call_never_inline, + => {}, + inline .sqrt, + .sin, + .cos, + .tan, + .exp, + .exp2, + .log, + .log2, + .log10, + .floor, + .ceil, + .round, + .trunc_float, + .neg, + .neg_optimized, + => |air_tag| if (l.features.contains(comptime .scalarize(air_tag))) { + const un_op = l.air_instructions.items(.data)[@intFromEnum(inst)].un_op; + if (l.typeOf(un_op).isVector(zcu)) continue :inst try l.scalarize(inst, .un_op); + }, + .cmp_lt, + .cmp_lt_optimized, + .cmp_lte, + .cmp_lte_optimized, + .cmp_eq, + .cmp_eq_optimized, + .cmp_gte, + .cmp_gte_optimized, + .cmp_gt, + .cmp_gt_optimized, + .cmp_neq, + .cmp_neq_optimized, + => {}, + inline .cmp_vector, + .cmp_vector_optimized, + => |air_tag| if (l.features.contains(comptime .scalarize(air_tag))) { + const ty_pl = l.air_instructions.items(.data)[@intFromEnum(inst)].ty_pl; + if (ty_pl.ty.toType().isVector(zcu)) continue :inst try l.scalarize(inst, .ty_pl_vector_cmp); + }, + .cond_br, + => { + const pl_op = l.air_instructions.items(.data)[@intFromEnum(inst)].pl_op; + const extra = l.extraData(Air.CondBr, pl_op.payload); + try l.legalizeBody(extra.end, extra.data.then_body_len); + try l.legalizeBody(extra.end + extra.data.then_body_len, extra.data.else_body_len); + }, + .switch_br, + .loop_switch_br, + => { + const pl_op = l.air_instructions.items(.data)[@intFromEnum(inst)].pl_op; + const extra = l.extraData(Air.SwitchBr, pl_op.payload); + const hint_bag_count = std.math.divCeil(usize, extra.data.cases_len + 1, 10) catch unreachable; + var extra_index = extra.end + hint_bag_count; + for (0..extra.data.cases_len) |_| { + const case_extra = l.extraData(Air.SwitchBr.Case, extra_index); + const case_body_start = case_extra.end + case_extra.data.items_len + case_extra.data.ranges_len * 2; + try l.legalizeBody(case_body_start, case_extra.data.body_len); + extra_index = case_body_start + case_extra.data.body_len; + } + try l.legalizeBody(extra_index, extra.data.else_body_len); + }, + .switch_dispatch, + => {}, + .@"try", + .try_cold, + => { + const pl_op = l.air_instructions.items(.data)[@intFromEnum(inst)].pl_op; + const extra = l.extraData(Air.Try, pl_op.payload); + try l.legalizeBody(extra.end, extra.data.body_len); + }, + .try_ptr, + .try_ptr_cold, + => { + const ty_pl = l.air_instructions.items(.data)[@intFromEnum(inst)].ty_pl; + const extra = l.extraData(Air.TryPtr, ty_pl.payload); + try l.legalizeBody(extra.end, extra.data.body_len); + }, + .dbg_stmt, + .dbg_empty_stmt, + => {}, + .dbg_inline_block, + => { + const ty_pl = l.air_instructions.items(.data)[@intFromEnum(inst)].ty_pl; + const extra = l.extraData(Air.DbgInlineBlock, ty_pl.payload); + try l.legalizeBody(extra.end, extra.data.body_len); + }, + .dbg_var_ptr, + .dbg_var_val, + .dbg_arg_inline, + .is_null, + .is_non_null, + .is_null_ptr, + .is_non_null_ptr, + .is_err, + .is_non_err, + .is_err_ptr, + .is_non_err_ptr, + .bool_and, + .bool_or, + .load, + .ret, + .ret_safe, + .ret_load, + .store, + .store_safe, + .unreach, + => {}, + .optional_payload, + .optional_payload_ptr, + .optional_payload_ptr_set, + .wrap_optional, + .unwrap_errunion_payload, + .unwrap_errunion_err, + .unwrap_errunion_payload_ptr, + .unwrap_errunion_err_ptr, + .errunion_payload_ptr_set, + .wrap_errunion_payload, + .wrap_errunion_err, + .struct_field_ptr, + .struct_field_ptr_index_0, + .struct_field_ptr_index_1, + .struct_field_ptr_index_2, + .struct_field_ptr_index_3, + .struct_field_val, + .set_union_tag, + .get_union_tag, + .slice, + .slice_len, + .slice_ptr, + .ptr_slice_len_ptr, + .ptr_slice_ptr_ptr, + .array_elem_val, + .slice_elem_val, + .slice_elem_ptr, + .ptr_elem_val, + .ptr_elem_ptr, + .array_to_slice, + => {}, + .reduce, + .reduce_optimized, + => if (l.features.contains(.reduce_one_elem_to_bitcast)) done: { + const reduce = l.air_instructions.items(.data)[@intFromEnum(inst)].reduce; + const vector_ty = l.typeOf(reduce.operand); + switch (vector_ty.vectorLen(zcu)) { + 0 => unreachable, + 1 => continue :inst l.replaceInst(inst, .bitcast, .{ .ty_op = .{ + .ty = Air.internedToRef(vector_ty.childType(zcu).toIntern()), + .operand = reduce.operand, + } }), + else => break :done, + } + }, + .splat, + => {}, + .shuffle_one => if (l.features.contains(.scalarize_shuffle_one)) continue :inst try l.scalarize(inst, .shuffle_one), + .shuffle_two => if (l.features.contains(.scalarize_shuffle_two)) continue :inst try l.scalarize(inst, .shuffle_two), + .select => if (l.features.contains(.scalarize_select)) continue :inst try l.scalarize(inst, .select), + .memset, + .memset_safe, + .memcpy, + .memmove, + .cmpxchg_weak, + .cmpxchg_strong, + .atomic_load, + .atomic_store_unordered, + .atomic_store_monotonic, + .atomic_store_release, + .atomic_store_seq_cst, + .atomic_rmw, + .is_named_enum_value, + .tag_name, + .error_name, + .error_set_has_value, + .aggregate_init, + .union_init, + .prefetch, + => {}, + inline .mul_add, + => |air_tag| if (l.features.contains(comptime .scalarize(air_tag))) { + const pl_op = l.air_instructions.items(.data)[@intFromEnum(inst)].pl_op; + if (l.typeOf(pl_op.operand).isVector(zcu)) continue :inst try l.scalarize(inst, .pl_op_bin); + }, + .field_parent_ptr, + .wasm_memory_size, + .wasm_memory_grow, + .cmp_lt_errors_len, + .err_return_trace, + .set_err_return_trace, + .addrspace_cast, + .save_err_return_trace_index, + .vector_store_elem, + .tlv_dllimport_ptr, + .c_va_arg, + .c_va_copy, + .c_va_end, + .c_va_start, + .work_item_id, + .work_group_size, + .work_group_id, + => {}, + } + } } -// inline to propagate comptime `tag`s -inline fn replaceInst(l: *Legalize, inst: Air.Inst.Index, tag: Air.Inst.Tag, data: Air.Inst.Data) Air.Inst.Tag { - const ip = &l.zcu.intern_pool; - const orig_ty = if (std.debug.runtime_safety) l.air.typeOfIndex(inst, ip) else {}; - l.air.instructions.items(.tag)[@intFromEnum(inst)] = tag; - l.air.instructions.items(.data)[@intFromEnum(inst)] = data; - if (std.debug.runtime_safety) std.debug.assert(l.air.typeOfIndex(inst, ip).toIntern() == orig_ty.toIntern()); +const ScalarizeForm = enum { un_op, ty_op, bin_op, ty_pl_vector_cmp, pl_op_bin, shuffle_one, shuffle_two, select }; +inline fn scalarize(l: *Legalize, orig_inst: Air.Inst.Index, comptime form: ScalarizeForm) Error!Air.Inst.Tag { + return l.replaceInst(orig_inst, .block, try l.scalarizeBlockPayload(orig_inst, form)); +} +fn scalarizeBlockPayload(l: *Legalize, orig_inst: Air.Inst.Index, comptime form: ScalarizeForm) Error!Air.Inst.Data { + const pt = l.pt; + const zcu = pt.zcu; + + const orig = l.air_instructions.get(@intFromEnum(orig_inst)); + const res_ty = l.typeOfIndex(orig_inst); + const res_len = res_ty.vectorLen(zcu); + + const extra_insts = switch (form) { + .un_op, .ty_op => 1, + .bin_op, .ty_pl_vector_cmp => 2, + .pl_op_bin => 3, + .shuffle_one, .shuffle_two => 13, + .select => 6, + }; + var inst_buf: [5 + extra_insts + 9]Air.Inst.Index = undefined; + try l.air_instructions.ensureUnusedCapacity(zcu.gpa, inst_buf.len); + + var res_block: Block = .init(&inst_buf); + { + const res_alloc_inst = res_block.add(l, .{ + .tag = .alloc, + .data = .{ .ty = try pt.singleMutPtrType(res_ty) }, + }); + const index_alloc_inst = res_block.add(l, .{ + .tag = .alloc, + .data = .{ .ty = .ptr_usize }, + }); + _ = res_block.add(l, .{ + .tag = .store, + .data = .{ .bin_op = .{ + .lhs = index_alloc_inst.toRef(), + .rhs = .zero_usize, + } }, + }); + + var loop: Loop = .init(l, &res_block); + loop.block = .init(res_block.stealRemainingCapacity()); + { + const cur_index_inst = loop.block.add(l, .{ + .tag = .load, + .data = .{ .ty_op = .{ + .ty = .usize_type, + .operand = index_alloc_inst.toRef(), + } }, + }); + _ = loop.block.add(l, .{ + .tag = .vector_store_elem, + .data = .{ .vector_store_elem = .{ + .vector_ptr = res_alloc_inst.toRef(), + .payload = try l.addExtra(Air.Bin, .{ + .lhs = cur_index_inst.toRef(), + .rhs = res_elem: switch (form) { + .un_op => loop.block.add(l, .{ + .tag = orig.tag, + .data = .{ .un_op = loop.block.add(l, .{ + .tag = .array_elem_val, + .data = .{ .bin_op = .{ + .lhs = orig.data.un_op, + .rhs = cur_index_inst.toRef(), + } }, + }).toRef() }, + }).toRef(), + .ty_op => loop.block.add(l, .{ + .tag = orig.tag, + .data = .{ .ty_op = .{ + .ty = Air.internedToRef(res_ty.childType(zcu).toIntern()), + .operand = loop.block.add(l, .{ + .tag = .array_elem_val, + .data = .{ .bin_op = .{ + .lhs = orig.data.ty_op.operand, + .rhs = cur_index_inst.toRef(), + } }, + }).toRef(), + } }, + }).toRef(), + .bin_op => loop.block.add(l, .{ + .tag = orig.tag, + .data = .{ .bin_op = .{ + .lhs = loop.block.add(l, .{ + .tag = .array_elem_val, + .data = .{ .bin_op = .{ + .lhs = orig.data.bin_op.lhs, + .rhs = cur_index_inst.toRef(), + } }, + }).toRef(), + .rhs = loop.block.add(l, .{ + .tag = .array_elem_val, + .data = .{ .bin_op = .{ + .lhs = orig.data.bin_op.rhs, + .rhs = cur_index_inst.toRef(), + } }, + }).toRef(), + } }, + }).toRef(), + .ty_pl_vector_cmp => { + const extra = l.extraData(Air.VectorCmp, orig.data.ty_pl.payload).data; + break :res_elem (try loop.block.addCmp( + l, + extra.compareOperator(), + loop.block.add(l, .{ + .tag = .array_elem_val, + .data = .{ .bin_op = .{ + .lhs = extra.lhs, + .rhs = cur_index_inst.toRef(), + } }, + }).toRef(), + loop.block.add(l, .{ + .tag = .array_elem_val, + .data = .{ .bin_op = .{ + .lhs = extra.rhs, + .rhs = cur_index_inst.toRef(), + } }, + }).toRef(), + .{ .optimized = switch (orig.tag) { + else => unreachable, + .cmp_vector => false, + .cmp_vector_optimized => true, + } }, + )).toRef(); + }, + .pl_op_bin => { + const extra = l.extraData(Air.Bin, orig.data.pl_op.payload).data; + break :res_elem loop.block.add(l, .{ + .tag = orig.tag, + .data = .{ .pl_op = .{ + .payload = try l.addExtra(Air.Bin, .{ + .lhs = loop.block.add(l, .{ + .tag = .array_elem_val, + .data = .{ .bin_op = .{ + .lhs = extra.lhs, + .rhs = cur_index_inst.toRef(), + } }, + }).toRef(), + .rhs = loop.block.add(l, .{ + .tag = .array_elem_val, + .data = .{ .bin_op = .{ + .lhs = extra.rhs, + .rhs = cur_index_inst.toRef(), + } }, + }).toRef(), + }), + .operand = loop.block.add(l, .{ + .tag = .array_elem_val, + .data = .{ .bin_op = .{ + .lhs = orig.data.pl_op.operand, + .rhs = cur_index_inst.toRef(), + } }, + }).toRef(), + } }, + }).toRef(); + }, + .shuffle_one, .shuffle_two => { + const ip = &zcu.intern_pool; + const unwrapped = switch (form) { + else => comptime unreachable, + .shuffle_one => l.getTmpAir().unwrapShuffleOne(zcu, orig_inst), + .shuffle_two => l.getTmpAir().unwrapShuffleTwo(zcu, orig_inst), + }; + const operand_a = switch (form) { + else => comptime unreachable, + .shuffle_one => unwrapped.operand, + .shuffle_two => unwrapped.operand_a, + }; + const operand_a_len = l.typeOf(operand_a).vectorLen(zcu); + const elem_ty = res_ty.childType(zcu); + var res_elem: Result = .init(l, elem_ty, &loop.block); + res_elem.block = .init(loop.block.stealCapacity(extra_insts)); + { + const ExpectedContents = extern struct { + mask_elems: [128]InternPool.Index, + ct_elems: switch (form) { + else => unreachable, + .shuffle_one => extern struct { + keys: [152]InternPool.Index, + header: u8 align(@alignOf(u32)), + index: [256][2]u8, + }, + .shuffle_two => void, + }, + }; + var stack align(@max(@alignOf(ExpectedContents), @alignOf(std.heap.StackFallbackAllocator(0)))) = + std.heap.stackFallback(@sizeOf(ExpectedContents), zcu.gpa); + const gpa = stack.get(); + + const mask_elems = try gpa.alloc(InternPool.Index, res_len); + defer gpa.free(mask_elems); + + var ct_elems: switch (form) { + else => unreachable, + .shuffle_one => std.AutoArrayHashMapUnmanaged(InternPool.Index, void), + .shuffle_two => struct { + const empty: @This() = .{}; + inline fn deinit(_: @This(), _: std.mem.Allocator) void {} + inline fn ensureTotalCapacity(_: @This(), _: std.mem.Allocator, _: usize) error{}!void {} + }, + } = .empty; + defer ct_elems.deinit(gpa); + try ct_elems.ensureTotalCapacity(gpa, res_len); + + const mask_elem_ty = try pt.intType(.signed, 1 + Type.smallestUnsignedBits(@max(operand_a_len, switch (form) { + else => comptime unreachable, + .shuffle_one => res_len, + .shuffle_two => l.typeOf(unwrapped.operand_b).vectorLen(zcu), + }))); + for (mask_elems, unwrapped.mask) |*mask_elem_val, mask_elem| mask_elem_val.* = (try pt.intValue(mask_elem_ty, switch (form) { + else => comptime unreachable, + .shuffle_one => switch (mask_elem.unwrap()) { + .elem => |index| index, + .value => |elem_val| if (ip.isUndef(elem_val)) + operand_a_len + else + ~@as(i33, @intCast((ct_elems.getOrPutAssumeCapacity(elem_val)).index)), + }, + .shuffle_two => switch (mask_elem.unwrap()) { + .a_elem => |a_index| a_index, + .b_elem => |b_index| ~@as(i33, b_index), + .undef => operand_a_len, + }, + })).toIntern(); + const mask_ty = try pt.arrayType(.{ + .len = res_len, + .child = mask_elem_ty.toIntern(), + }); + const mask_elem_inst = res_elem.block.add(l, .{ + .tag = .ptr_elem_val, + .data = .{ .bin_op = .{ + .lhs = Air.internedToRef(try pt.intern(.{ .ptr = .{ + .ty = (try pt.manyConstPtrType(mask_elem_ty)).toIntern(), + .base_addr = .{ .uav = .{ + .val = try pt.intern(.{ .aggregate = .{ + .ty = mask_ty.toIntern(), + .storage = .{ .elems = mask_elems }, + } }), + .orig_ty = (try pt.singleConstPtrType(mask_ty)).toIntern(), + } }, + .byte_offset = 0, + } })), + .rhs = cur_index_inst.toRef(), + } }, + }); + var def_cond_br: CondBr = .init(l, (try res_elem.block.addCmp( + l, + .lt, + mask_elem_inst.toRef(), + try pt.intRef(mask_elem_ty, operand_a_len), + .{}, + )).toRef(), &res_elem.block, .{}); + def_cond_br.then_block = .init(res_elem.block.stealRemainingCapacity()); + { + const operand_b_used = switch (form) { + else => comptime unreachable, + .shuffle_one => ct_elems.count() > 0, + .shuffle_two => true, + }; + var operand_cond_br: CondBr = undefined; + operand_cond_br.then_block = if (operand_b_used) then_block: { + operand_cond_br = .init(l, (try def_cond_br.then_block.addCmp( + l, + .gte, + mask_elem_inst.toRef(), + try pt.intRef(mask_elem_ty, 0), + .{}, + )).toRef(), &def_cond_br.then_block, .{}); + break :then_block .init(def_cond_br.then_block.stealRemainingCapacity()); + } else def_cond_br.then_block; + _ = operand_cond_br.then_block.add(l, .{ + .tag = .br, + .data = .{ .br = .{ + .block_inst = res_elem.inst, + .operand = operand_cond_br.then_block.add(l, .{ + .tag = .array_elem_val, + .data = .{ .bin_op = .{ + .lhs = operand_a, + .rhs = operand_cond_br.then_block.add(l, .{ + .tag = .intcast, + .data = .{ .ty_op = .{ + .ty = .usize_type, + .operand = mask_elem_inst.toRef(), + } }, + }).toRef(), + } }, + }).toRef(), + } }, + }); + if (operand_b_used) { + operand_cond_br.else_block = .init(operand_cond_br.then_block.stealRemainingCapacity()); + _ = operand_cond_br.else_block.add(l, .{ + .tag = .br, + .data = .{ .br = .{ + .block_inst = res_elem.inst, + .operand = if (switch (form) { + else => comptime unreachable, + .shuffle_one => ct_elems.count() > 1, + .shuffle_two => true, + }) operand_cond_br.else_block.add(l, .{ + .tag = switch (form) { + else => comptime unreachable, + .shuffle_one => .ptr_elem_val, + .shuffle_two => .array_elem_val, + }, + .data = .{ .bin_op = .{ + .lhs = operand_b: switch (form) { + else => comptime unreachable, + .shuffle_one => { + const ct_elems_ty = try pt.arrayType(.{ + .len = ct_elems.count(), + .child = elem_ty.toIntern(), + }); + break :operand_b Air.internedToRef(try pt.intern(.{ .ptr = .{ + .ty = (try pt.manyConstPtrType(elem_ty)).toIntern(), + .base_addr = .{ .uav = .{ + .val = try pt.intern(.{ .aggregate = .{ + .ty = ct_elems_ty.toIntern(), + .storage = .{ .elems = ct_elems.keys() }, + } }), + .orig_ty = (try pt.singleConstPtrType(ct_elems_ty)).toIntern(), + } }, + .byte_offset = 0, + } })); + }, + .shuffle_two => unwrapped.operand_b, + }, + .rhs = operand_cond_br.else_block.add(l, .{ + .tag = .intcast, + .data = .{ .ty_op = .{ + .ty = .usize_type, + .operand = operand_cond_br.else_block.add(l, .{ + .tag = .not, + .data = .{ .ty_op = .{ + .ty = Air.internedToRef(mask_elem_ty.toIntern()), + .operand = mask_elem_inst.toRef(), + } }, + }).toRef(), + } }, + }).toRef(), + } }, + }).toRef() else res_elem_br: { + _ = operand_cond_br.else_block.stealCapacity(3); + break :res_elem_br Air.internedToRef(ct_elems.keys()[0]); + }, + } }, + }); + def_cond_br.else_block = .init(operand_cond_br.else_block.stealRemainingCapacity()); + try operand_cond_br.finish(l); + } else { + def_cond_br.then_block = operand_cond_br.then_block; + _ = def_cond_br.then_block.stealCapacity(6); + def_cond_br.else_block = .init(def_cond_br.then_block.stealRemainingCapacity()); + } + } + _ = def_cond_br.else_block.add(l, .{ + .tag = .br, + .data = .{ .br = .{ + .block_inst = res_elem.inst, + .operand = try pt.undefRef(elem_ty), + } }, + }); + try def_cond_br.finish(l); + } + try res_elem.finish(l); + break :res_elem res_elem.inst.toRef(); + }, + .select => { + const extra = l.extraData(Air.Bin, orig.data.pl_op.payload).data; + var res_elem: Result = .init(l, l.typeOf(extra.lhs).childType(zcu), &loop.block); + res_elem.block = .init(loop.block.stealCapacity(extra_insts)); + { + var select_cond_br: CondBr = .init(l, res_elem.block.add(l, .{ + .tag = .array_elem_val, + .data = .{ .bin_op = .{ + .lhs = orig.data.pl_op.operand, + .rhs = cur_index_inst.toRef(), + } }, + }).toRef(), &res_elem.block, .{}); + select_cond_br.then_block = .init(res_elem.block.stealRemainingCapacity()); + _ = select_cond_br.then_block.add(l, .{ + .tag = .br, + .data = .{ .br = .{ + .block_inst = res_elem.inst, + .operand = select_cond_br.then_block.add(l, .{ + .tag = .array_elem_val, + .data = .{ .bin_op = .{ + .lhs = extra.lhs, + .rhs = cur_index_inst.toRef(), + } }, + }).toRef(), + } }, + }); + select_cond_br.else_block = .init(select_cond_br.then_block.stealRemainingCapacity()); + _ = select_cond_br.else_block.add(l, .{ + .tag = .br, + .data = .{ .br = .{ + .block_inst = res_elem.inst, + .operand = select_cond_br.else_block.add(l, .{ + .tag = .array_elem_val, + .data = .{ .bin_op = .{ + .lhs = extra.rhs, + .rhs = cur_index_inst.toRef(), + } }, + }).toRef(), + } }, + }); + try select_cond_br.finish(l); + } + try res_elem.finish(l); + break :res_elem res_elem.inst.toRef(); + }, + }, + }), + } }, + }); + + var loop_cond_br: CondBr = .init(l, (try loop.block.addCmp( + l, + .lt, + cur_index_inst.toRef(), + try pt.intRef(.usize, res_len - 1), + .{}, + )).toRef(), &loop.block, .{}); + loop_cond_br.then_block = .init(loop.block.stealRemainingCapacity()); + { + _ = loop_cond_br.then_block.add(l, .{ + .tag = .store, + .data = .{ .bin_op = .{ + .lhs = index_alloc_inst.toRef(), + .rhs = loop_cond_br.then_block.add(l, .{ + .tag = .add, + .data = .{ .bin_op = .{ + .lhs = cur_index_inst.toRef(), + .rhs = .one_usize, + } }, + }).toRef(), + } }, + }); + _ = loop_cond_br.then_block.add(l, .{ + .tag = .repeat, + .data = .{ .repeat = .{ .loop_inst = loop.inst } }, + }); + } + loop_cond_br.else_block = .init(loop_cond_br.then_block.stealRemainingCapacity()); + _ = loop_cond_br.else_block.add(l, .{ + .tag = .br, + .data = .{ .br = .{ + .block_inst = orig_inst, + .operand = loop_cond_br.else_block.add(l, .{ + .tag = .load, + .data = .{ .ty_op = .{ + .ty = Air.internedToRef(res_ty.toIntern()), + .operand = res_alloc_inst.toRef(), + } }, + }).toRef(), + } }, + }); + try loop_cond_br.finish(l); + } + try loop.finish(l); + } + return .{ .ty_pl = .{ + .ty = Air.internedToRef(res_ty.toIntern()), + .payload = try l.addBlockBody(res_block.body()), + } }; +} +fn scalarizeOverflowBlockPayload(l: *Legalize, orig_inst: Air.Inst.Index) Error!Air.Inst.Data { + const pt = l.pt; + const zcu = pt.zcu; + + const orig = l.air_instructions.get(@intFromEnum(orig_inst)); + const res_ty = l.typeOfIndex(orig_inst); + const wrapped_res_ty = res_ty.fieldType(0, zcu); + const wrapped_res_scalar_ty = wrapped_res_ty.childType(zcu); + const res_len = wrapped_res_ty.vectorLen(zcu); + + var inst_buf: [21]Air.Inst.Index = undefined; + try l.air_instructions.ensureUnusedCapacity(zcu.gpa, inst_buf.len); + + var res_block: Block = .init(&inst_buf); + { + const res_alloc_inst = res_block.add(l, .{ + .tag = .alloc, + .data = .{ .ty = try pt.singleMutPtrType(res_ty) }, + }); + const ptr_wrapped_res_inst = res_block.add(l, .{ + .tag = .struct_field_ptr_index_0, + .data = .{ .ty_op = .{ + .ty = Air.internedToRef((try pt.singleMutPtrType(wrapped_res_ty)).toIntern()), + .operand = res_alloc_inst.toRef(), + } }, + }); + const ptr_overflow_res_inst = res_block.add(l, .{ + .tag = .struct_field_ptr_index_1, + .data = .{ .ty_op = .{ + .ty = Air.internedToRef((try pt.singleMutPtrType(res_ty.fieldType(1, zcu))).toIntern()), + .operand = res_alloc_inst.toRef(), + } }, + }); + const index_alloc_inst = res_block.add(l, .{ + .tag = .alloc, + .data = .{ .ty = .ptr_usize }, + }); + _ = res_block.add(l, .{ + .tag = .store, + .data = .{ .bin_op = .{ + .lhs = index_alloc_inst.toRef(), + .rhs = .zero_usize, + } }, + }); + + var loop: Loop = .init(l, &res_block); + loop.block = .init(res_block.stealRemainingCapacity()); + { + const cur_index_inst = loop.block.add(l, .{ + .tag = .load, + .data = .{ .ty_op = .{ + .ty = .usize_type, + .operand = index_alloc_inst.toRef(), + } }, + }); + const extra = l.extraData(Air.Bin, orig.data.ty_pl.payload).data; + const res_elem = loop.block.add(l, .{ + .tag = orig.tag, + .data = .{ .ty_pl = .{ + .ty = Air.internedToRef(try zcu.intern_pool.getTupleType(zcu.gpa, pt.tid, .{ + .types = &.{ wrapped_res_scalar_ty.toIntern(), .u1_type }, + .values = &(.{.none} ** 2), + })), + .payload = try l.addExtra(Air.Bin, .{ + .lhs = loop.block.add(l, .{ + .tag = .array_elem_val, + .data = .{ .bin_op = .{ + .lhs = extra.lhs, + .rhs = cur_index_inst.toRef(), + } }, + }).toRef(), + .rhs = loop.block.add(l, .{ + .tag = .array_elem_val, + .data = .{ .bin_op = .{ + .lhs = extra.rhs, + .rhs = cur_index_inst.toRef(), + } }, + }).toRef(), + }), + } }, + }); + _ = loop.block.add(l, .{ + .tag = .vector_store_elem, + .data = .{ .vector_store_elem = .{ + .vector_ptr = ptr_overflow_res_inst.toRef(), + .payload = try l.addExtra(Air.Bin, .{ + .lhs = cur_index_inst.toRef(), + .rhs = loop.block.add(l, .{ + .tag = .struct_field_val, + .data = .{ .ty_pl = .{ + .ty = .u1_type, + .payload = try l.addExtra(Air.StructField, .{ + .struct_operand = res_elem.toRef(), + .field_index = 1, + }), + } }, + }).toRef(), + }), + } }, + }); + _ = loop.block.add(l, .{ + .tag = .vector_store_elem, + .data = .{ .vector_store_elem = .{ + .vector_ptr = ptr_wrapped_res_inst.toRef(), + .payload = try l.addExtra(Air.Bin, .{ + .lhs = cur_index_inst.toRef(), + .rhs = loop.block.add(l, .{ + .tag = .struct_field_val, + .data = .{ .ty_pl = .{ + .ty = Air.internedToRef(wrapped_res_scalar_ty.toIntern()), + .payload = try l.addExtra(Air.StructField, .{ + .struct_operand = res_elem.toRef(), + .field_index = 0, + }), + } }, + }).toRef(), + }), + } }, + }); + + var loop_cond_br: CondBr = .init(l, (try loop.block.addCmp( + l, + .lt, + cur_index_inst.toRef(), + try pt.intRef(.usize, res_len - 1), + .{}, + )).toRef(), &loop.block, .{}); + loop_cond_br.then_block = .init(loop.block.stealRemainingCapacity()); + { + _ = loop_cond_br.then_block.add(l, .{ + .tag = .store, + .data = .{ .bin_op = .{ + .lhs = index_alloc_inst.toRef(), + .rhs = loop_cond_br.then_block.add(l, .{ + .tag = .add, + .data = .{ .bin_op = .{ + .lhs = cur_index_inst.toRef(), + .rhs = .one_usize, + } }, + }).toRef(), + } }, + }); + _ = loop_cond_br.then_block.add(l, .{ + .tag = .repeat, + .data = .{ .repeat = .{ .loop_inst = loop.inst } }, + }); + } + loop_cond_br.else_block = .init(loop_cond_br.then_block.stealRemainingCapacity()); + _ = loop_cond_br.else_block.add(l, .{ + .tag = .br, + .data = .{ .br = .{ + .block_inst = orig_inst, + .operand = loop_cond_br.else_block.add(l, .{ + .tag = .load, + .data = .{ .ty_op = .{ + .ty = Air.internedToRef(res_ty.toIntern()), + .operand = res_alloc_inst.toRef(), + } }, + }).toRef(), + } }, + }); + try loop_cond_br.finish(l); + } + try loop.finish(l); + } + return .{ .ty_pl = .{ + .ty = Air.internedToRef(res_ty.toIntern()), + .payload = try l.addBlockBody(res_block.body()), + } }; +} + +fn safeIntcastBlockPayload(l: *Legalize, orig_inst: Air.Inst.Index) Error!Air.Inst.Data { + const pt = l.pt; + const zcu = pt.zcu; + const ty_op = l.air_instructions.items(.data)[@intFromEnum(orig_inst)].ty_op; + + const operand_ref = ty_op.operand; + const operand_ty = l.typeOf(operand_ref); + const dest_ty = ty_op.ty.toType(); + + const is_vector = operand_ty.zigTypeTag(zcu) == .vector; + const operand_scalar_ty = operand_ty.scalarType(zcu); + const dest_scalar_ty = dest_ty.scalarType(zcu); + + assert(operand_scalar_ty.zigTypeTag(zcu) == .int); + const dest_is_enum = switch (dest_scalar_ty.zigTypeTag(zcu)) { + .int => false, + .@"enum" => true, + else => unreachable, + }; + + const operand_info = operand_scalar_ty.intInfo(zcu); + const dest_info = dest_scalar_ty.intInfo(zcu); + + const have_min_check, const have_max_check = c: { + const dest_pos_bits = dest_info.bits - @intFromBool(dest_info.signedness == .signed); + const operand_pos_bits = operand_info.bits - @intFromBool(operand_info.signedness == .signed); + const dest_allows_neg = dest_info.signedness == .signed and dest_info.bits > 0; + const operand_allows_neg = operand_info.signedness == .signed and operand_info.bits > 0; + break :c .{ + operand_allows_neg and (!dest_allows_neg or dest_info.bits < operand_info.bits), + dest_pos_bits < operand_pos_bits, + }; + }; + + // The worst-case scenario in terms of total instructions and total condbrs is the case where + // the result type is an exhaustive enum whose tag type is smaller than the operand type: + // + // %x = block({ + // %1 = cmp_lt(%y, @min_allowed_int) + // %2 = cmp_gt(%y, @max_allowed_int) + // %3 = bool_or(%1, %2) + // %4 = cond_br(%3, { + // %5 = call(@panic.invalidEnumValue, []) + // %6 = unreach() + // }, { + // %7 = intcast(@res_ty, %y) + // %8 = is_named_enum_value(%7) + // %9 = cond_br(%8, { + // %10 = br(%x, %7) + // }, { + // %11 = call(@panic.invalidEnumValue, []) + // %12 = unreach() + // }) + // }) + // }) + // + // Note that vectors of enums don't exist -- the worst case for vectors is this: + // + // %x = block({ + // %1 = cmp_lt(%y, @min_allowed_int) + // %2 = cmp_gt(%y, @max_allowed_int) + // %3 = bool_or(%1, %2) + // %4 = reduce(%3, .@"or") + // %5 = cond_br(%4, { + // %6 = call(@panic.invalidEnumValue, []) + // %7 = unreach() + // }, { + // %8 = intcast(@res_ty, %y) + // %9 = br(%x, %8) + // }) + // }) + + var inst_buf: [12]Air.Inst.Index = undefined; + try l.air_instructions.ensureUnusedCapacity(zcu.gpa, inst_buf.len); + var condbr_buf: [2]CondBr = undefined; + var condbr_idx: usize = 0; + + var main_block: Block = .init(&inst_buf); + var cur_block: *Block = &main_block; + + const panic_id: Zcu.SimplePanicId = if (dest_is_enum) .invalid_enum_value else .integer_out_of_bounds; + + if (have_min_check or have_max_check) { + const dest_int_ty = if (dest_is_enum) dest_ty.intTagType(zcu) else dest_ty; + const condbr = &condbr_buf[condbr_idx]; + condbr_idx += 1; + const below_min_inst: Air.Inst.Index = if (have_min_check) inst: { + const min_val_ref = Air.internedToRef((try dest_int_ty.minInt(pt, operand_ty)).toIntern()); + break :inst try cur_block.addCmp(l, .lt, operand_ref, min_val_ref, .{ .vector = is_vector }); + } else undefined; + const above_max_inst: Air.Inst.Index = if (have_max_check) inst: { + const max_val_ref = Air.internedToRef((try dest_int_ty.maxInt(pt, operand_ty)).toIntern()); + break :inst try cur_block.addCmp(l, .gt, operand_ref, max_val_ref, .{ .vector = is_vector }); + } else undefined; + const out_of_range_inst: Air.Inst.Index = inst: { + if (have_min_check and have_max_check) break :inst cur_block.add(l, .{ + .tag = .bool_or, + .data = .{ .bin_op = .{ + .lhs = below_min_inst.toRef(), + .rhs = above_max_inst.toRef(), + } }, + }); + if (have_min_check) break :inst below_min_inst; + if (have_max_check) break :inst above_max_inst; + unreachable; + }; + const scalar_out_of_range_inst: Air.Inst.Index = if (is_vector) cur_block.add(l, .{ + .tag = .reduce, + .data = .{ .reduce = .{ + .operand = out_of_range_inst.toRef(), + .operation = .Or, + } }, + }) else out_of_range_inst; + condbr.* = .init(l, scalar_out_of_range_inst.toRef(), cur_block, .{ .true = .cold }); + condbr.then_block = .init(cur_block.stealRemainingCapacity()); + try condbr.then_block.addPanic(l, panic_id); + condbr.else_block = .init(condbr.then_block.stealRemainingCapacity()); + cur_block = &condbr.else_block; + } + + // Now we know we're in-range, we can intcast: + const cast_inst = cur_block.add(l, .{ + .tag = .intcast, + .data = .{ .ty_op = .{ + .ty = Air.internedToRef(dest_ty.toIntern()), + .operand = operand_ref, + } }, + }); + // For ints we're already done, but for exhaustive enums we must check this is a valid tag. + if (dest_is_enum and !dest_ty.isNonexhaustiveEnum(zcu) and zcu.backendSupportsFeature(.is_named_enum_value)) { + assert(!is_vector); // vectors of enums don't exist + // We are building this: + // %1 = is_named_enum_value(%cast_inst) + // %2 = cond_br(%1, { + // + // }, { + // + // }) + const is_named_inst = cur_block.add(l, .{ + .tag = .is_named_enum_value, + .data = .{ .un_op = cast_inst.toRef() }, + }); + const condbr = &condbr_buf[condbr_idx]; + condbr_idx += 1; + condbr.* = .init(l, is_named_inst.toRef(), cur_block, .{ .false = .cold }); + condbr.else_block = .init(cur_block.stealRemainingCapacity()); + try condbr.else_block.addPanic(l, panic_id); + condbr.then_block = .init(condbr.else_block.stealRemainingCapacity()); + cur_block = &condbr.then_block; + } + // Finally, just `br` to our outer `block`. + _ = cur_block.add(l, .{ + .tag = .br, + .data = .{ .br = .{ + .block_inst = orig_inst, + .operand = cast_inst.toRef(), + } }, + }); + // We might not have used all of the instructions; that's intentional. + _ = cur_block.stealRemainingCapacity(); + + for (condbr_buf[0..condbr_idx]) |*condbr| try condbr.finish(l); + return .{ .ty_pl = .{ + .ty = Air.internedToRef(dest_ty.toIntern()), + .payload = try l.addBlockBody(main_block.body()), + } }; +} +fn safeArithmeticBlockPayload(l: *Legalize, orig_inst: Air.Inst.Index, overflow_op_tag: Air.Inst.Tag) Error!Air.Inst.Data { + const pt = l.pt; + const zcu = pt.zcu; + const bin_op = l.air_instructions.items(.data)[@intFromEnum(orig_inst)].bin_op; + + const operand_ty = l.typeOf(bin_op.lhs); + assert(l.typeOf(bin_op.rhs).toIntern() == operand_ty.toIntern()); + const is_vector = operand_ty.zigTypeTag(zcu) == .vector; + + const overflow_tuple_ty = try pt.overflowArithmeticTupleType(operand_ty); + const overflow_bits_ty = overflow_tuple_ty.fieldType(1, zcu); + + // The worst-case scenario is a vector operand: + // + // %1 = add_with_overflow(%x, %y) + // %2 = struct_field_val(%1, .@"1") + // %3 = reduce(%2, .@"or") + // %4 = bitcast(%3, @bool_type) + // %5 = cond_br(%4, { + // %6 = call(@panic.integerOverflow, []) + // %7 = unreach() + // }, { + // %8 = struct_field_val(%1, .@"0") + // %9 = br(%z, %8) + // }) + var inst_buf: [9]Air.Inst.Index = undefined; + try l.air_instructions.ensureUnusedCapacity(zcu.gpa, inst_buf.len); + + var main_block: Block = .init(&inst_buf); + + const overflow_op_inst = main_block.add(l, .{ + .tag = overflow_op_tag, + .data = .{ .ty_pl = .{ + .ty = Air.internedToRef(overflow_tuple_ty.toIntern()), + .payload = try l.addExtra(Air.Bin, .{ + .lhs = bin_op.lhs, + .rhs = bin_op.rhs, + }), + } }, + }); + const overflow_bits_inst = main_block.add(l, .{ + .tag = .struct_field_val, + .data = .{ .ty_pl = .{ + .ty = Air.internedToRef(overflow_bits_ty.toIntern()), + .payload = try l.addExtra(Air.StructField, .{ + .struct_operand = overflow_op_inst.toRef(), + .field_index = 1, + }), + } }, + }); + const any_overflow_bit_inst = if (is_vector) main_block.add(l, .{ + .tag = .reduce, + .data = .{ .reduce = .{ + .operand = overflow_bits_inst.toRef(), + .operation = .Or, + } }, + }) else overflow_bits_inst; + const any_overflow_inst = try main_block.addCmp(l, .eq, any_overflow_bit_inst.toRef(), .one_u1, .{}); + + var condbr: CondBr = .init(l, any_overflow_inst.toRef(), &main_block, .{ .true = .cold }); + condbr.then_block = .init(main_block.stealRemainingCapacity()); + try condbr.then_block.addPanic(l, .integer_overflow); + condbr.else_block = .init(condbr.then_block.stealRemainingCapacity()); + + const result_inst = condbr.else_block.add(l, .{ + .tag = .struct_field_val, + .data = .{ .ty_pl = .{ + .ty = Air.internedToRef(operand_ty.toIntern()), + .payload = try l.addExtra(Air.StructField, .{ + .struct_operand = overflow_op_inst.toRef(), + .field_index = 0, + }), + } }, + }); + _ = condbr.else_block.add(l, .{ + .tag = .br, + .data = .{ .br = .{ + .block_inst = orig_inst, + .operand = result_inst.toRef(), + } }, + }); + // We might not have used all of the instructions; that's intentional. + _ = condbr.else_block.stealRemainingCapacity(); + + try condbr.finish(l); + return .{ .ty_pl = .{ + .ty = Air.internedToRef(operand_ty.toIntern()), + .payload = try l.addBlockBody(main_block.body()), + } }; +} + +const Block = struct { + instructions: []Air.Inst.Index, + len: usize, + + /// There are two common usages of the API: + /// * `buf.len` is exactly the number of instructions which will be in this block + /// * `buf.len` is no smaller than necessary, and `b.stealRemainingCapacity` will be used + fn init(buf: []Air.Inst.Index) Block { + return .{ + .instructions = buf, + .len = 0, + }; + } + + /// Like `Legalize.addInstAssumeCapacity`, but also appends the instruction to `b`. + fn add(b: *Block, l: *Legalize, inst_data: Air.Inst) Air.Inst.Index { + const inst = l.addInstAssumeCapacity(inst_data); + b.instructions[b.len] = inst; + b.len += 1; + return inst; + } + + /// Adds the code to call the panic handler `panic_id`. This is usually `.call` then `.unreach`, + /// but if `Zcu.Feature.panic_fn` is unsupported, we lower to `.trap` instead. + fn addPanic(b: *Block, l: *Legalize, panic_id: Zcu.SimplePanicId) Error!void { + const zcu = l.pt.zcu; + if (!zcu.backendSupportsFeature(.panic_fn)) { + _ = b.add(l, .{ + .tag = .trap, + .data = .{ .no_op = {} }, + }); + return; + } + const panic_fn_val = zcu.builtin_decl_values.get(panic_id.toBuiltin()); + _ = b.add(l, .{ + .tag = .call, + .data = .{ .pl_op = .{ + .operand = Air.internedToRef(panic_fn_val), + .payload = try l.addExtra(Air.Call, .{ .args_len = 0 }), + } }, + }); + _ = b.add(l, .{ + .tag = .unreach, + .data = .{ .no_op = {} }, + }); + } + + /// Adds a `cmp_*` instruction (including maybe `cmp_vector`) to `b`. This is a fairly thin wrapper + /// around `add`, although it does compute the result type if `is_vector` (`@Vector(n, bool)`). + fn addCmp( + b: *Block, + l: *Legalize, + op: std.math.CompareOperator, + lhs: Air.Inst.Ref, + rhs: Air.Inst.Ref, + opts: struct { optimized: bool = false, vector: bool = false }, + ) Error!Air.Inst.Index { + const pt = l.pt; + if (opts.vector) { + const bool_vec_ty = try pt.vectorType(.{ + .child = .bool_type, + .len = l.typeOf(lhs).vectorLen(pt.zcu), + }); + return b.add(l, .{ + .tag = if (opts.optimized) .cmp_vector_optimized else .cmp_vector, + .data = .{ .ty_pl = .{ + .ty = Air.internedToRef(bool_vec_ty.toIntern()), + .payload = try l.addExtra(Air.VectorCmp, .{ + .lhs = lhs, + .rhs = rhs, + .op = Air.VectorCmp.encodeOp(op), + }), + } }, + }); + } + return b.add(l, .{ + .tag = switch (op) { + .lt => if (opts.optimized) .cmp_lt_optimized else .cmp_lt, + .lte => if (opts.optimized) .cmp_lte_optimized else .cmp_lte, + .eq => if (opts.optimized) .cmp_eq_optimized else .cmp_eq, + .gte => if (opts.optimized) .cmp_gte_optimized else .cmp_gte, + .gt => if (opts.optimized) .cmp_gt_optimized else .cmp_gt, + .neq => if (opts.optimized) .cmp_neq_optimized else .cmp_neq, + }, + .data = .{ .bin_op = .{ + .lhs = lhs, + .rhs = rhs, + } }, + }); + } + + /// Returns the unused capacity of `b.instructions`, and shrinks `b.instructions` down to `b.len`. + /// This is useful when you've provided a buffer big enough for all your instructions, but you are + /// now starting a new block and some of them need to live there instead. + fn stealRemainingCapacity(b: *Block) []Air.Inst.Index { + return b.stealFrom(b.len); + } + + /// Returns `len` elements taken from the unused capacity of `b.instructions`, and shrinks + /// `b.instructions` down to not include them anymore. + /// This is useful when you've provided a buffer big enough for all your instructions, but you are + /// now starting a new block and some of them need to live there instead. + fn stealCapacity(b: *Block, len: usize) []Air.Inst.Index { + return b.stealFrom(b.instructions.len - len); + } + + fn stealFrom(b: *Block, start: usize) []Air.Inst.Index { + assert(start >= b.len); + defer b.instructions.len = start; + return b.instructions[start..]; + } + + fn body(b: *const Block) []const Air.Inst.Index { + assert(b.len == b.instructions.len); + return b.instructions; + } +}; + +const Result = struct { + inst: Air.Inst.Index, + block: Block, + + /// The return value has `block` initialized to `undefined`; it is the caller's reponsibility + /// to initialize it. + fn init(l: *Legalize, ty: Type, parent_block: *Block) Result { + return .{ + .inst = parent_block.add(l, .{ + .tag = .block, + .data = .{ .ty_pl = .{ + .ty = Air.internedToRef(ty.toIntern()), + .payload = undefined, + } }, + }), + .block = undefined, + }; + } + + fn finish(res: Result, l: *Legalize) Error!void { + const data = &l.air_instructions.items(.data)[@intFromEnum(res.inst)]; + data.ty_pl.payload = try l.addBlockBody(res.block.body()); + } +}; + +const Loop = struct { + inst: Air.Inst.Index, + block: Block, + + /// The return value has `block` initialized to `undefined`; it is the caller's reponsibility + /// to initialize it. + fn init(l: *Legalize, parent_block: *Block) Loop { + return .{ + .inst = parent_block.add(l, .{ + .tag = .loop, + .data = .{ .ty_pl = .{ + .ty = .noreturn_type, + .payload = undefined, + } }, + }), + .block = undefined, + }; + } + + fn finish(loop: Loop, l: *Legalize) Error!void { + const data = &l.air_instructions.items(.data)[@intFromEnum(loop.inst)]; + data.ty_pl.payload = try l.addBlockBody(loop.block.body()); + } +}; + +const CondBr = struct { + inst: Air.Inst.Index, + hints: Air.CondBr.BranchHints, + then_block: Block, + else_block: Block, + + /// The return value has `then_block` and `else_block` initialized to `undefined`; it is the + /// caller's reponsibility to initialize them. + fn init(l: *Legalize, operand: Air.Inst.Ref, parent_block: *Block, hints: Air.CondBr.BranchHints) CondBr { + return .{ + .inst = parent_block.add(l, .{ + .tag = .cond_br, + .data = .{ .pl_op = .{ + .operand = operand, + .payload = undefined, + } }, + }), + .hints = hints, + .then_block = undefined, + .else_block = undefined, + }; + } + + fn finish(cond_br: CondBr, l: *Legalize) Error!void { + const then_body = cond_br.then_block.body(); + const else_body = cond_br.else_block.body(); + try l.air_extra.ensureUnusedCapacity(l.pt.zcu.gpa, 3 + then_body.len + else_body.len); + + const data = &l.air_instructions.items(.data)[@intFromEnum(cond_br.inst)]; + data.pl_op.payload = @intCast(l.air_extra.items.len); + l.air_extra.appendSliceAssumeCapacity(&.{ + @intCast(then_body.len), + @intCast(else_body.len), + @bitCast(cond_br.hints), + }); + l.air_extra.appendSliceAssumeCapacity(@ptrCast(then_body)); + l.air_extra.appendSliceAssumeCapacity(@ptrCast(else_body)); + } +}; + +fn addInstAssumeCapacity(l: *Legalize, inst: Air.Inst) Air.Inst.Index { + defer l.air_instructions.appendAssumeCapacity(inst); + return @enumFromInt(l.air_instructions.len); +} + +fn addExtra(l: *Legalize, comptime Extra: type, extra: Extra) Error!u32 { + const extra_fields = @typeInfo(Extra).@"struct".fields; + try l.air_extra.ensureUnusedCapacity(l.pt.zcu.gpa, extra_fields.len); + defer inline for (extra_fields) |field| l.air_extra.appendAssumeCapacity(switch (field.type) { + u32 => @field(extra, field.name), + Air.Inst.Ref => @intFromEnum(@field(extra, field.name)), + else => @compileError(@typeName(field.type)), + }); + return @intCast(l.air_extra.items.len); +} + +fn addBlockBody(l: *Legalize, body: []const Air.Inst.Index) Error!u32 { + try l.air_extra.ensureUnusedCapacity(l.pt.zcu.gpa, 1 + body.len); + defer { + l.air_extra.appendAssumeCapacity(@intCast(body.len)); + l.air_extra.appendSliceAssumeCapacity(@ptrCast(body)); + } + return @intCast(l.air_extra.items.len); +} + +/// Returns `tag` to remind the caller to `continue :inst` the result. +/// This is inline to propagate the comptime-known `tag`. +inline fn replaceInst(l: *Legalize, inst: Air.Inst.Index, comptime tag: Air.Inst.Tag, data: Air.Inst.Data) Air.Inst.Tag { + const orig_ty = if (std.debug.runtime_safety) l.typeOfIndex(inst) else {}; + l.air_instructions.set(@intFromEnum(inst), .{ .tag = tag, .data = data }); + if (std.debug.runtime_safety) assert(l.typeOfIndex(inst).toIntern() == orig_ty.toIntern()); return tag; } const Air = @import("../Air.zig"); -const codegen = @import("../codegen.zig"); +const assert = std.debug.assert; +const dev = @import("../dev.zig"); +const InternPool = @import("../InternPool.zig"); const Legalize = @This(); const std = @import("std"); +const Type = @import("../Type.zig"); const Zcu = @import("../Zcu.zig"); diff --git a/src/Air/Liveness.zig b/src/Air/Liveness.zig index 34ecde26e2..7acba48ed0 100644 --- a/src/Air/Liveness.zig +++ b/src/Air/Liveness.zig @@ -15,6 +15,7 @@ const Liveness = @This(); const trace = @import("../tracy.zig").trace; const Air = @import("../Air.zig"); const InternPool = @import("../InternPool.zig"); +const Zcu = @import("../Zcu.zig"); pub const Verify = @import("Liveness/Verify.zig"); @@ -136,12 +137,15 @@ fn LivenessPassData(comptime pass: LivenessPass) type { }; } -pub fn analyze(gpa: Allocator, air: Air, intern_pool: *InternPool) Allocator.Error!Liveness { +pub fn analyze(zcu: *Zcu, air: Air, intern_pool: *InternPool) Allocator.Error!Liveness { const tracy = trace(@src()); defer tracy.end(); + const gpa = zcu.gpa; + var a: Analysis = .{ .gpa = gpa, + .zcu = zcu, .air = air, .tomb_bits = try gpa.alloc( usize, @@ -220,6 +224,7 @@ const OperandCategory = enum { pub fn categorizeOperand( l: Liveness, air: Air, + zcu: *Zcu, inst: Air.Inst.Index, operand: Air.Inst.Index, ip: *const InternPool, @@ -511,10 +516,15 @@ pub fn categorizeOperand( if (extra.rhs == operand_ref) return matchOperandSmallIndex(l, inst, 2, .none); return .none; }, - .shuffle => { - const extra = air.extraData(Air.Shuffle, air_datas[@intFromEnum(inst)].ty_pl.payload).data; - if (extra.a == operand_ref) return matchOperandSmallIndex(l, inst, 0, .none); - if (extra.b == operand_ref) return matchOperandSmallIndex(l, inst, 1, .none); + .shuffle_one => { + const unwrapped = air.unwrapShuffleOne(zcu, inst); + if (unwrapped.operand == operand_ref) return matchOperandSmallIndex(l, inst, 0, .none); + return .none; + }, + .shuffle_two => { + const unwrapped = air.unwrapShuffleTwo(zcu, inst); + if (unwrapped.operand_a == operand_ref) return matchOperandSmallIndex(l, inst, 0, .none); + if (unwrapped.operand_b == operand_ref) return matchOperandSmallIndex(l, inst, 1, .none); return .none; }, .reduce, .reduce_optimized => { @@ -639,7 +649,7 @@ pub fn categorizeOperand( var operand_live: bool = true; for (&[_]Air.Inst.Index{ then_body[0], else_body[0] }) |cond_inst| { - if (l.categorizeOperand(air, cond_inst, operand, ip) == .tomb) + if (l.categorizeOperand(air, zcu, cond_inst, operand, ip) == .tomb) operand_live = false; switch (air_tags[@intFromEnum(cond_inst)]) { @@ -824,6 +834,7 @@ pub const BigTomb = struct { /// In-progress data; on successful analysis converted into `Liveness`. const Analysis = struct { gpa: Allocator, + zcu: *Zcu, air: Air, intern_pool: *InternPool, tomb_bits: []usize, @@ -1119,9 +1130,13 @@ fn analyzeInst( const extra = a.air.extraData(Air.Bin, pl_op.payload).data; return analyzeOperands(a, pass, data, inst, .{ pl_op.operand, extra.lhs, extra.rhs }); }, - .shuffle => { - const extra = a.air.extraData(Air.Shuffle, inst_datas[@intFromEnum(inst)].ty_pl.payload).data; - return analyzeOperands(a, pass, data, inst, .{ extra.a, extra.b, .none }); + .shuffle_one => { + const unwrapped = a.air.unwrapShuffleOne(a.zcu, inst); + return analyzeOperands(a, pass, data, inst, .{ unwrapped.operand, .none, .none }); + }, + .shuffle_two => { + const unwrapped = a.air.unwrapShuffleTwo(a.zcu, inst); + return analyzeOperands(a, pass, data, inst, .{ unwrapped.operand_a, unwrapped.operand_b, .none }); }, .reduce, .reduce_optimized => { const reduce = inst_datas[@intFromEnum(inst)].reduce; diff --git a/src/Air/Liveness/Verify.zig b/src/Air/Liveness/Verify.zig index e7ed37956d..4ad24cf924 100644 --- a/src/Air/Liveness/Verify.zig +++ b/src/Air/Liveness/Verify.zig @@ -1,6 +1,7 @@ //! Verifies that Liveness information is valid. gpa: std.mem.Allocator, +zcu: *Zcu, air: Air, liveness: Liveness, live: LiveMap = .{}, @@ -287,10 +288,13 @@ fn verifyBody(self: *Verify, body: []const Air.Inst.Index) Error!void { const extra = self.air.extraData(Air.Bin, ty_pl.payload).data; try self.verifyInstOperands(inst, .{ extra.lhs, extra.rhs, .none }); }, - .shuffle => { - const ty_pl = data[@intFromEnum(inst)].ty_pl; - const extra = self.air.extraData(Air.Shuffle, ty_pl.payload).data; - try self.verifyInstOperands(inst, .{ extra.a, extra.b, .none }); + .shuffle_one => { + const unwrapped = self.air.unwrapShuffleOne(self.zcu, inst); + try self.verifyInstOperands(inst, .{ unwrapped.operand, .none, .none }); + }, + .shuffle_two => { + const unwrapped = self.air.unwrapShuffleTwo(self.zcu, inst); + try self.verifyInstOperands(inst, .{ unwrapped.operand_a, unwrapped.operand_b, .none }); }, .cmp_vector, .cmp_vector_optimized, @@ -639,4 +643,5 @@ const log = std.log.scoped(.liveness_verify); const Air = @import("../../Air.zig"); const Liveness = @import("../Liveness.zig"); const InternPool = @import("../../InternPool.zig"); +const Zcu = @import("../../Zcu.zig"); const Verify = @This(); diff --git a/src/Air/types_resolved.zig b/src/Air/types_resolved.zig index 873f70ec50..eb17402ebe 100644 --- a/src/Air/types_resolved.zig +++ b/src/Air/types_resolved.zig @@ -249,12 +249,22 @@ fn checkBody(air: Air, body: []const Air.Inst.Index, zcu: *Zcu) bool { if (!checkRef(extra.struct_operand, zcu)) return false; }, - .shuffle => { - const extra = air.extraData(Air.Shuffle, data.ty_pl.payload).data; - if (!checkType(data.ty_pl.ty.toType(), zcu)) return false; - if (!checkRef(extra.a, zcu)) return false; - if (!checkRef(extra.b, zcu)) return false; - if (!checkVal(Value.fromInterned(extra.mask), zcu)) return false; + .shuffle_one => { + const unwrapped = air.unwrapShuffleOne(zcu, inst); + if (!checkType(unwrapped.result_ty, zcu)) return false; + if (!checkRef(unwrapped.operand, zcu)) return false; + for (unwrapped.mask) |m| switch (m.unwrap()) { + .elem => {}, + .value => |val| if (!checkVal(.fromInterned(val), zcu)) return false, + }; + }, + + .shuffle_two => { + const unwrapped = air.unwrapShuffleTwo(zcu, inst); + if (!checkType(unwrapped.result_ty, zcu)) return false; + if (!checkRef(unwrapped.operand_a, zcu)) return false; + if (!checkRef(unwrapped.operand_b, zcu)) return false; + // No values to check because there are no comptime-known values other than undef }, .cmpxchg_weak, diff --git a/src/Compilation.zig b/src/Compilation.zig index 0e74cff502..cb89688b9b 100644 --- a/src/Compilation.zig +++ b/src/Compilation.zig @@ -2529,6 +2529,7 @@ pub fn destroy(comp: *Compilation) void { pub fn clearMiscFailures(comp: *Compilation) void { comp.alloc_failure_occurred = false; + comp.link_diags.flags = .{}; for (comp.misc_failures.values()) |*value| { value.deinit(comp.gpa); } @@ -2795,7 +2796,6 @@ pub fn update(comp: *Compilation, main_progress_node: std.Progress.Node) !void { if (anyErrors(comp)) { // Skip flushing and keep source files loaded for error reporting. - comp.link_diags.flags = .{}; return; } diff --git a/src/InternPool.zig b/src/InternPool.zig index f5fb33ede9..44f19e3e29 100644 --- a/src/InternPool.zig +++ b/src/InternPool.zig @@ -4579,10 +4579,11 @@ pub const Index = enum(u32) { undefined_type, enum_literal_type, + ptr_usize_type, + ptr_const_comptime_int_type, manyptr_u8_type, manyptr_const_u8_type, manyptr_const_u8_sentinel_0_type, - single_const_pointer_to_comptime_int_type, slice_const_u8_type, slice_const_u8_sentinel_0_type, @@ -4649,19 +4650,29 @@ pub const Index = enum(u32) { /// `undefined` (untyped) undef, + /// `@as(bool, undefined)` + undef_bool, + /// `@as(usize, undefined)` + undef_usize, + /// `@as(u1, undefined)` + undef_u1, /// `0` (comptime_int) zero, - /// `0` (usize) + /// `@as(usize, 0)` zero_usize, - /// `0` (u8) + /// `@as(u1, 0)` + zero_u1, + /// `@as(u8, 0)` zero_u8, /// `1` (comptime_int) one, - /// `1` (usize) + /// `@as(usize, 1)` one_usize, - /// `1` (u8) + /// `@as(u1, 1)` + one_u1, + /// `@as(u8, 1)` one_u8, - /// `4` (u8) + /// `@as(u8, 4)` four_u8, /// `-1` (comptime_int) negative_one, @@ -5074,6 +5085,20 @@ pub const static_keys: [static_len]Key = .{ .{ .simple_type = .undefined }, .{ .simple_type = .enum_literal }, + // *usize + .{ .ptr_type = .{ + .child = .usize_type, + .flags = .{}, + } }, + + // *const comptime_int + .{ .ptr_type = .{ + .child = .comptime_int_type, + .flags = .{ + .is_const = true, + }, + } }, + // [*]u8 .{ .ptr_type = .{ .child = .u8_type, @@ -5101,15 +5126,6 @@ pub const static_keys: [static_len]Key = .{ }, } }, - // *const comptime_int - .{ .ptr_type = .{ - .child = .comptime_int_type, - .flags = .{ - .size = .one, - .is_const = true, - }, - } }, - // []const u8 .{ .ptr_type = .{ .child = .u8_type, @@ -5245,6 +5261,9 @@ pub const static_keys: [static_len]Key = .{ } }, .{ .simple_value = .undefined }, + .{ .undef = .bool_type }, + .{ .undef = .usize_type }, + .{ .undef = .u1_type }, .{ .int = .{ .ty = .comptime_int_type, @@ -5256,6 +5275,11 @@ pub const static_keys: [static_len]Key = .{ .storage = .{ .u64 = 0 }, } }, + .{ .int = .{ + .ty = .u1_type, + .storage = .{ .u64 = 0 }, + } }, + .{ .int = .{ .ty = .u8_type, .storage = .{ .u64 = 0 }, @@ -5271,17 +5295,21 @@ pub const static_keys: [static_len]Key = .{ .storage = .{ .u64 = 1 }, } }, - // one_u8 + .{ .int = .{ + .ty = .u1_type, + .storage = .{ .u64 = 1 }, + } }, + .{ .int = .{ .ty = .u8_type, .storage = .{ .u64 = 1 }, } }, - // four_u8 + .{ .int = .{ .ty = .u8_type, .storage = .{ .u64 = 4 }, } }, - // negative_one + .{ .int = .{ .ty = .comptime_int_type, .storage = .{ .i64 = -1 }, @@ -10482,7 +10510,7 @@ pub fn getCoerced( .base_addr = .int, .byte_offset = 0, } }), - .len = try ip.get(gpa, tid, .{ .undef = .usize_type }), + .len = .undef_usize, } }), }; }, @@ -10601,7 +10629,7 @@ pub fn getCoerced( .base_addr = .int, .byte_offset = 0, } }), - .len = try ip.get(gpa, tid, .{ .undef = .usize_type }), + .len = .undef_usize, } }), }, else => |payload| try ip.getCoerced(gpa, tid, payload, new_ty), @@ -11847,10 +11875,11 @@ pub fn typeOf(ip: *const InternPool, index: Index) Index { .null_type, .undefined_type, .enum_literal_type, + .ptr_usize_type, + .ptr_const_comptime_int_type, .manyptr_u8_type, .manyptr_const_u8_type, .manyptr_const_u8_sentinel_0_type, - .single_const_pointer_to_comptime_int_type, .slice_const_u8_type, .slice_const_u8_sentinel_0_type, .vector_8_i8_type, @@ -11909,12 +11938,13 @@ pub fn typeOf(ip: *const InternPool, index: Index) Index { .undef => .undefined_type, .zero, .one, .negative_one => .comptime_int_type, - .zero_usize, .one_usize => .usize_type, + .undef_usize, .zero_usize, .one_usize => .usize_type, + .undef_u1, .zero_u1, .one_u1 => .u1_type, .zero_u8, .one_u8, .four_u8 => .u8_type, .void_value => .void_type, .unreachable_value => .noreturn_type, .null_value => .null_type, - .bool_true, .bool_false => .bool_type, + .undef_bool, .bool_true, .bool_false => .bool_type, .empty_tuple => .empty_tuple_type, // This optimization on tags is needed so that indexToKey can call @@ -12186,10 +12216,11 @@ pub fn zigTypeTag(ip: *const InternPool, index: Index) std.builtin.TypeId { .undefined_type => .undefined, .enum_literal_type => .enum_literal, + .ptr_usize_type, + .ptr_const_comptime_int_type, .manyptr_u8_type, .manyptr_const_u8_type, .manyptr_const_u8_sentinel_0_type, - .single_const_pointer_to_comptime_int_type, .slice_const_u8_type, .slice_const_u8_sentinel_0_type, => .pointer, @@ -12251,11 +12282,16 @@ pub fn zigTypeTag(ip: *const InternPool, index: Index) std.builtin.TypeId { // values, not types .undef => unreachable, + .undef_bool => unreachable, + .undef_usize => unreachable, + .undef_u1 => unreachable, .zero => unreachable, .zero_usize => unreachable, + .zero_u1 => unreachable, .zero_u8 => unreachable, .one => unreachable, .one_usize => unreachable, + .one_u1 => unreachable, .one_u8 => unreachable, .four_u8 => unreachable, .negative_one => unreachable, diff --git a/src/Sema.zig b/src/Sema.zig index 3fa264be48..f051a62af3 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -1881,7 +1881,7 @@ fn analyzeBodyInner( extra.data.else_body_len, ); const uncasted_cond = try sema.resolveInst(extra.data.condition); - const cond = try sema.coerce(block, Type.bool, uncasted_cond, cond_src); + const cond = try sema.coerce(block, .bool, uncasted_cond, cond_src); const cond_val = try sema.resolveConstDefinedValue( block, cond_src, @@ -2012,7 +2012,7 @@ fn resolveConstBool( reason: ComptimeReason, ) !bool { const air_inst = try sema.resolveInst(zir_ref); - const wanted_type = Type.bool; + const wanted_type: Type = .bool; const coerced_inst = try sema.coerce(block, wanted_type, air_inst, src); const val = try sema.resolveConstDefinedValue(block, src, coerced_inst, reason); return val.toBool(); @@ -2037,7 +2037,7 @@ pub fn toConstString( reason: ComptimeReason, ) ![]u8 { const pt = sema.pt; - const coerced_inst = try sema.coerce(block, Type.slice_const_u8, air_inst, src); + const coerced_inst = try sema.coerce(block, .slice_const_u8, air_inst, src); const slice_val = try sema.resolveConstDefinedValue(block, src, coerced_inst, reason); const arr_val = try sema.derefSliceAsArray(block, src, slice_val, reason); return arr_val.toAllocatedBytes(arr_val.typeOf(pt.zcu), sema.arena, pt); @@ -2051,7 +2051,7 @@ pub fn resolveConstStringIntern( reason: ComptimeReason, ) !InternPool.NullTerminatedString { const air_inst = try sema.resolveInst(zir_ref); - const wanted_type = Type.slice_const_u8; + const wanted_type: Type = .slice_const_u8; const coerced_inst = try sema.coerce(block, wanted_type, air_inst, src); const val = try sema.resolveConstDefinedValue(block, src, coerced_inst, reason); return sema.sliceToIpString(block, src, val, reason); @@ -2180,7 +2180,7 @@ fn analyzeAsType( src: LazySrcLoc, air_inst: Air.Inst.Ref, ) !Type { - const wanted_type = Type.type; + const wanted_type: Type = .type; const coerced_inst = try sema.coerce(block, wanted_type, air_inst, src); const val = try sema.resolveConstDefinedValue(block, src, coerced_inst, .{ .simple = .type }); return val.toType(); @@ -2641,7 +2641,7 @@ fn reparentOwnedErrorMsg( msg.msg = msg_str; } -const align_ty = Type.u29; +const align_ty: Type = .u29; pub fn analyzeAsAlign( sema: *Sema, @@ -2819,7 +2819,7 @@ fn getCaptures(sema: *Sema, block: *Block, type_src: LazySrcLoc, extra_index: us const pt = sema.pt; const zcu = pt.zcu; const ip = &zcu.intern_pool; - const parent_ty = Type.fromInterned(zcu.namespacePtr(block.namespace).owner_type); + const parent_ty: Type = .fromInterned(zcu.namespacePtr(block.namespace).owner_type); const parent_captures: InternPool.CaptureValue.Slice = parent_ty.getCaptures(zcu); const captures = try sema.arena.alloc(InternPool.CaptureValue, captures_len); @@ -3777,7 +3777,7 @@ fn zirMakePtrConst(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileErro const alloc = try sema.resolveInst(inst_data.operand); const alloc_ty = sema.typeOf(alloc); const ptr_info = alloc_ty.ptrInfo(zcu); - const elem_ty = Type.fromInterned(ptr_info.child); + const elem_ty: Type = .fromInterned(ptr_info.child); // If the alloc was created in a comptime scope, we already created a comptime alloc for it. // However, if the final constructed value does not reference comptime-mutable memory, we wish @@ -3848,7 +3848,7 @@ fn resolveComptimeKnownAllocPtr(sema: *Sema, block: *Block, alloc: Air.Inst.Ref, const alloc_ty = resolved_alloc_ty orelse sema.typeOf(alloc); const ptr_info = alloc_ty.ptrInfo(zcu); - const elem_ty = Type.fromInterned(ptr_info.child); + const elem_ty: Type = .fromInterned(ptr_info.child); const alloc_inst = alloc.toIndex() orelse return null; const comptime_info = sema.maybe_comptime_allocs.fetchRemove(alloc_inst) orelse return null; @@ -4024,9 +4024,9 @@ fn resolveComptimeKnownAllocPtr(sema: *Sema, block: *Block, alloc: Air.Inst.Ref, // As this is a union field, we must store to the pointer now to set the tag. // If the payload is OPV, there will not be a payload store, so we store that value. // Otherwise, there will be a payload store to process later, so undef will suffice. - const payload_ty = Type.fromInterned(union_obj.field_types.get(&zcu.intern_pool)[idx]); + const payload_ty: Type = .fromInterned(union_obj.field_types.get(&zcu.intern_pool)[idx]); const payload_val = try sema.typeHasOnePossibleValue(payload_ty) orelse try pt.undefValue(payload_ty); - const tag_val = try pt.enumValueFieldIndex(Type.fromInterned(union_obj.enum_tag_ty), idx); + const tag_val = try pt.enumValueFieldIndex(.fromInterned(union_obj.enum_tag_ty), idx); const store_val = try pt.unionValue(maybe_union_ty, tag_val, payload_val); try sema.storePtrVal(block, LazySrcLoc.unneeded, Value.fromInterned(decl_parent_ptr), store_val, maybe_union_ty); } @@ -4050,7 +4050,7 @@ fn resolveComptimeKnownAllocPtr(sema: *Sema, block: *Block, alloc: Air.Inst.Ref, const air_ptr_inst = store_inst.data.bin_op.lhs.toIndex().?; const store_val = (try sema.resolveValue(store_inst.data.bin_op.rhs)).?; const new_ptr = ptr_mapping.get(air_ptr_inst).?; - try sema.storePtrVal(block, LazySrcLoc.unneeded, Value.fromInterned(new_ptr), store_val, Type.fromInterned(zcu.intern_pool.typeOf(store_val.toIntern()))); + try sema.storePtrVal(block, LazySrcLoc.unneeded, Value.fromInterned(new_ptr), store_val, .fromInterned(zcu.intern_pool.typeOf(store_val.toIntern()))); }, else => unreachable, } @@ -4284,7 +4284,7 @@ fn zirResolveInferredAlloc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Com else => unreachable, }; if (zcu.intern_pool.isFuncBody(val)) { - const ty = Type.fromInterned(zcu.intern_pool.typeOf(val)); + const ty: Type = .fromInterned(zcu.intern_pool.typeOf(val)); if (try ty.fnHasRuntimeBitsSema(pt)) { try sema.addReferenceEntry(block, src, AnalUnit.wrap(.{ .func = val })); try zcu.ensureFuncBodyAnalysisQueued(val); @@ -4447,14 +4447,14 @@ fn zirForLen(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. const range_end = try sema.resolveInst(zir_arg_pair[1]); break :l try sema.analyzeArithmetic(block, .sub, range_end, range_start, arg_src, arg_src, arg_src, true); }; - const arg_len = try sema.coerce(block, Type.usize, arg_len_uncoerced, arg_src); + const arg_len = try sema.coerce(block, .usize, arg_len_uncoerced, arg_src); if (len == .none) { len = arg_len; len_idx = i; } if (try sema.resolveDefinedValue(block, src, arg_len)) |arg_val| { if (len_val) |v| { - if (!(try sema.valuesEqual(arg_val, v, Type.usize))) { + if (!(try sema.valuesEqual(arg_val, v, .usize))) { const msg = msg: { const msg = try sema.errMsg(src, "non-matching for loop lengths", .{}); errdefer msg.destroy(gpa); @@ -5343,7 +5343,7 @@ fn zirValidatePtrArrayInit( // sentinel-terminated array, the sentinel will not have been populated by // any ZIR instructions at comptime; we need to do that here. if (array_ty.sentinel(zcu)) |sentinel_val| { - const array_len_ref = try pt.intRef(Type.usize, array_len); + const array_len_ref = try pt.intRef(.usize, array_len); const sentinel_ptr = try sema.elemPtrArray(block, init_src, init_src, array_ptr, init_src, array_len_ref, true, true); const sentinel = Air.internedToRef(sentinel_val.toIntern()); try sema.storePtr2(block, init_src, sentinel_ptr, init_src, sentinel, init_src, .store); @@ -5828,7 +5828,7 @@ fn zirInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins defer tracy.end(); const int = sema.code.instructions.items(.data)[@intFromEnum(inst)].int; - return sema.pt.intRef(Type.comptime_int, int); + return sema.pt.intRef(.comptime_int, int); } fn zirIntBig(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { @@ -5846,7 +5846,7 @@ fn zirIntBig(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. const limbs = try sema.arena.alloc(std.math.big.Limb, int.len); @memcpy(mem.sliceAsBytes(limbs), limb_bytes); - return Air.internedToRef((try sema.pt.intValue_big(Type.comptime_int, .{ + return Air.internedToRef((try sema.pt.intValue_big(.comptime_int, .{ .limbs = limbs, .positive = true, })).toIntern()); @@ -5856,7 +5856,7 @@ fn zirFloat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.I _ = block; const number = sema.code.instructions.items(.data)[@intFromEnum(inst)].float; return Air.internedToRef((try sema.pt.floatValue( - Type.comptime_float, + .comptime_float, number, )).toIntern()); } @@ -5866,7 +5866,7 @@ fn zirFloat128(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node; const extra = sema.code.extraData(Zir.Inst.Float128, inst_data.payload_index).data; const number = extra.get(); - return Air.internedToRef((try sema.pt.floatValue(Type.comptime_float, number)).toIntern()); + return Air.internedToRef((try sema.pt.floatValue(.comptime_float, number)).toIntern()); } fn zirCompileError(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void { @@ -6641,7 +6641,7 @@ pub fn analyzeExport( }; const exported_nav = ip.getNav(exported_nav_index); - const export_ty = Type.fromInterned(exported_nav.typeOf(ip)); + const export_ty: Type = .fromInterned(exported_nav.typeOf(ip)); if (!try sema.validateExternType(export_ty, .other)) { return sema.failWithOwnedErrorMsg(block, msg: { @@ -7005,7 +7005,7 @@ fn lookupInNamespace( for (usingnamespaces.items) |sub_ns_nav| { try sema.ensureNavResolved(block, src, sub_ns_nav, .fully); - const sub_ns_ty = Type.fromInterned(ip.getNav(sub_ns_nav).status.fully_resolved.val); + const sub_ns_ty: Type = .fromInterned(ip.getNav(sub_ns_nav).status.fully_resolved.val); const sub_ns = zcu.namespacePtr(sub_ns_ty.getNamespaceIndex(zcu)); try checked_namespaces.put(gpa, sub_ns, {}); } @@ -7081,7 +7081,7 @@ pub fn analyzeSaveErrRetIndex(sema: *Sema, block: *Block) SemaError!Air.Inst.Ref const gpa = sema.gpa; if (block.isComptime() or block.is_typeof) { - const index_val = try pt.intValue_u64(Type.usize, sema.comptime_err_ret_trace.items.len); + const index_val = try pt.intValue_u64(.usize, sema.comptime_err_ret_trace.items.len); return Air.internedToRef(index_val.toIntern()); } @@ -7326,13 +7326,13 @@ fn checkCallArgumentCount( ) !Type { const pt = sema.pt; const zcu = pt.zcu; - const func_ty = func_ty: { + const func_ty: Type = func_ty: { switch (callee_ty.zigTypeTag(zcu)) { .@"fn" => break :func_ty callee_ty, .pointer => { const ptr_info = callee_ty.ptrInfo(zcu); if (ptr_info.flags.size == .one and Type.fromInterned(ptr_info.child).zigTypeTag(zcu) == .@"fn") { - break :func_ty Type.fromInterned(ptr_info.child); + break :func_ty .fromInterned(ptr_info.child); } }, .optional => { @@ -7405,13 +7405,13 @@ fn callBuiltin( const pt = sema.pt; const zcu = pt.zcu; const callee_ty = sema.typeOf(builtin_fn); - const func_ty = func_ty: { + const func_ty: Type = func_ty: { switch (callee_ty.zigTypeTag(zcu)) { .@"fn" => break :func_ty callee_ty, .pointer => { const ptr_info = callee_ty.ptrInfo(zcu); if (ptr_info.flags.size == .one and Type.fromInterned(ptr_info.child).zigTypeTag(zcu) == .@"fn") { - break :func_ty Type.fromInterned(ptr_info.child); + break :func_ty .fromInterned(ptr_info.child); } }, else => {}, @@ -7568,7 +7568,7 @@ const CallArgsInfo = union(enum) { } } // Give the arg its result type - const provide_param_ty = if (maybe_param_ty) |t| t else Type.generic_poison; + const provide_param_ty: Type = maybe_param_ty orelse .generic_poison; sema.inst_map.putAssumeCapacity(zir_call.call_inst, Air.internedToRef(provide_param_ty.toIntern())); // Resolve the arg! const uncoerced_arg = try sema.resolveInlineBody(block, arg_body, zir_call.call_inst); @@ -8353,7 +8353,7 @@ fn handleTailCall(sema: *Sema, block: *Block, call_src: LazySrcLoc, func_ty: Typ @tagName(backend), @tagName(target.cpu.arch), }); } - const owner_func_ty = Type.fromInterned(zcu.funcInfo(sema.owner.unwrap().func).ty); + const owner_func_ty: Type = .fromInterned(zcu.funcInfo(sema.owner.unwrap().func).ty); if (owner_func_ty.toIntern() != func_ty.toIntern()) { return sema.fail(block, call_src, "unable to perform tail call: type of function being called '{}' does not match type of calling function '{}'", .{ func_ty.fmt(pt), owner_func_ty.fmt(pt), @@ -8452,7 +8452,7 @@ fn zirVectorType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError! const len_src = block.builtinCallArgSrc(inst_data.src_node, 0); const elem_type_src = block.builtinCallArgSrc(inst_data.src_node, 1); const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; - const len: u32 = @intCast(try sema.resolveInt(block, len_src, extra.lhs, Type.u32, .{ .simple = .vector_length })); + const len: u32 = @intCast(try sema.resolveInt(block, len_src, extra.lhs, .u32, .{ .simple = .vector_length })); const elem_type = try sema.resolveType(block, elem_type_src, extra.rhs); try sema.checkVectorElemType(block, elem_type_src, elem_type); const vector_type = try sema.pt.vectorType(.{ @@ -8470,7 +8470,7 @@ fn zirArrayType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; const len_src = block.src(.{ .node_offset_array_type_len = inst_data.src_node }); const elem_src = block.src(.{ .node_offset_array_type_elem = inst_data.src_node }); - const len = try sema.resolveInt(block, len_src, extra.lhs, Type.usize, .{ .simple = .array_length }); + const len = try sema.resolveInt(block, len_src, extra.lhs, .usize, .{ .simple = .array_length }); const elem_type = try sema.resolveType(block, elem_src, extra.rhs); try sema.validateArrayElemType(block, elem_type, elem_src); const array_ty = try sema.pt.arrayType(.{ @@ -8490,7 +8490,7 @@ fn zirArrayTypeSentinel(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Compil const len_src = block.src(.{ .node_offset_array_type_len = inst_data.src_node }); const sentinel_src = block.src(.{ .node_offset_array_type_sentinel = inst_data.src_node }); const elem_src = block.src(.{ .node_offset_array_type_elem = inst_data.src_node }); - const len = try sema.resolveInt(block, len_src, extra.len, Type.usize, .{ .simple = .array_length }); + const len = try sema.resolveInt(block, len_src, extra.len, .usize, .{ .simple = .array_length }); const elem_type = try sema.resolveType(block, elem_src, extra.elem_type); try sema.validateArrayElemType(block, elem_type, elem_src); const uncasted_sentinel = try sema.resolveInst(extra.sentinel); @@ -8599,7 +8599,7 @@ fn zirIntFromError(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstD const src = block.nodeOffset(extra.node); const operand_src = block.builtinCallArgSrc(extra.node, 0); const uncasted_operand = try sema.resolveInst(extra.operand); - const operand = try sema.coerce(block, Type.anyerror, uncasted_operand, operand_src); + const operand = try sema.coerce(block, .anyerror, uncasted_operand, operand_src); const err_int_ty = try pt.errorIntType(); if (try sema.resolveValue(operand)) |val| { @@ -8912,21 +8912,10 @@ fn zirEnumFromInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError try sema.requireRuntimeBlock(block, src, operand_src); if (block.wantSafety()) { - if (zcu.backendSupportsFeature(.safety_checked_instructions)) { + if (zcu.backendSupportsFeature(.panic_fn)) { _ = try sema.preparePanicId(src, .invalid_enum_value); - return block.addTyOp(.intcast_safe, dest_ty, operand); - } else { - // Slightly silly fallback case... - const int_tag_ty = dest_ty.intTagType(zcu); - // Use `intCast`, since it'll set up the Sema-emitted safety checks for us! - const int_val = try sema.intCast(block, src, int_tag_ty, src, operand, src, true, true); - const result = try block.addBitCast(dest_ty, int_val); - if (!dest_ty.isNonexhaustiveEnum(zcu) and zcu.backendSupportsFeature(.is_named_enum_value)) { - const ok = try block.addUnOp(.is_named_enum_value, result); - try sema.addSafetyCheck(block, src, ok, .invalid_enum_value); - } - return result; } + return block.addTyOp(.intcast_safe, dest_ty, operand); } return block.addTyOp(.intcast, dest_ty, operand); } @@ -9309,7 +9298,7 @@ fn zirFunc( const ret_ty: Type = if (extra.data.ret_ty.is_generic) .generic_poison else switch (extra.data.ret_ty.body_len) { - 0 => Type.void, + 0 => .void, 1 => blk: { const ret_ty_ref: Zir.Inst.Ref = @enumFromInt(sema.code.extra[extra_index]); extra_index += 1; @@ -9319,7 +9308,7 @@ fn zirFunc( const ret_ty_body = sema.code.bodySlice(extra_index, extra.data.ret_ty.body_len); extra_index += ret_ty_body.len; - const ret_ty_val = try sema.resolveGenericBody(block, ret_ty_src, ret_ty_body, inst, Type.type, .{ .simple = .function_ret_ty }); + const ret_ty_val = try sema.resolveGenericBody(block, ret_ty_src, ret_ty_body, inst, .type, .{ .simple = .function_ret_ty }); break :blk ret_ty_val.toType(); }, }; @@ -9649,7 +9638,7 @@ fn funcCommon( var comptime_bits: u32 = 0; for (block.params.items(.ty), block.params.items(.is_comptime), 0..) |param_ty_ip, param_is_comptime, i| { - const param_ty = Type.fromInterned(param_ty_ip); + const param_ty: Type = .fromInterned(param_ty_ip); const is_noalias = blk: { const index = std.math.cast(u5, i) orelse break :blk false; break :blk @as(u1, @truncate(noalias_bits >> index)) != 0; @@ -9870,7 +9859,7 @@ fn finishFunc( const return_type: Type = if (opt_func_index == .none or ret_poison) bare_return_type else - Type.fromInterned(ip.funcTypeReturnType(ip.typeOf(opt_func_index))); + .fromInterned(ip.funcTypeReturnType(ip.typeOf(opt_func_index))); if (!return_type.isValidReturnType(zcu)) { const opaque_str = if (return_type.zigTypeTag(zcu) == .@"opaque") "opaque " else ""; @@ -10130,14 +10119,14 @@ fn zirIntFromPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError! if (try sema.resolveValue(operand)) |operand_val| ct: { if (!is_vector) { if (operand_val.isUndef(zcu)) { - return Air.internedToRef((try pt.undefValue(Type.usize)).toIntern()); + return .undef_usize; } const addr = try operand_val.getUnsignedIntSema(pt) orelse { // Wasn't an integer pointer. This is a runtime operation. break :ct; }; return Air.internedToRef((try pt.intValue( - Type.usize, + .usize, addr, )).toIntern()); } @@ -10145,7 +10134,7 @@ fn zirIntFromPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError! for (new_elems, 0..) |*new_elem, i| { const ptr_val = try operand_val.elemValue(pt, i); if (ptr_val.isUndef(zcu)) { - new_elem.* = (try pt.undefValue(Type.usize)).toIntern(); + new_elem.* = .undef_usize; continue; } const addr = try ptr_val.getUnsignedIntSema(pt) orelse { @@ -10153,7 +10142,7 @@ fn zirIntFromPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError! break :ct; }; new_elem.* = (try pt.intValue( - Type.usize, + .usize, addr, )).toIntern(); } @@ -10165,16 +10154,7 @@ fn zirIntFromPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError! try sema.requireRuntimeBlock(block, block.nodeOffset(inst_data.src_node), ptr_src); try sema.validateRuntimeValue(block, ptr_src, operand); try sema.checkLogicalPtrOperation(block, ptr_src, ptr_ty); - if (!is_vector or zcu.backendSupportsFeature(.all_vector_instructions)) { - return block.addBitCast(dest_ty, operand); - } - const new_elems = try sema.arena.alloc(Air.Inst.Ref, len); - for (new_elems, 0..) |*new_elem, i| { - const idx_ref = try pt.intRef(Type.usize, i); - const old_elem = try block.addBinOp(.array_elem_val, operand, idx_ref); - new_elem.* = try block.addBitCast(.usize, old_elem); - } - return block.addAggregateInit(dest_ty, new_elems); + return block.addBitCast(dest_ty, operand); } fn zirFieldVal(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { @@ -10283,7 +10263,7 @@ fn zirIntCast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air const dest_ty = try sema.resolveDestType(block, src, extra.lhs, .remove_eu_opt, "@intCast"); const operand = try sema.resolveInst(extra.rhs); - return sema.intCast(block, block.nodeOffset(inst_data.src_node), dest_ty, src, operand, operand_src, true, false); + return sema.intCast(block, block.nodeOffset(inst_data.src_node), dest_ty, src, operand, operand_src); } fn intCast( @@ -10294,8 +10274,6 @@ fn intCast( dest_ty_src: LazySrcLoc, operand: Air.Inst.Ref, operand_src: LazySrcLoc, - runtime_safety: bool, - safety_panics_are_enum: bool, ) CompileError!Air.Inst.Ref { const pt = sema.pt; const zcu = pt.zcu; @@ -10314,7 +10292,7 @@ fn intCast( if ((try sema.typeHasOnePossibleValue(dest_ty))) |opv| { // requirement: intCast(u0, input) iff input == 0 - if (runtime_safety and block.wantSafety()) { + if (block.wantSafety()) { try sema.requireRuntimeBlock(block, src, operand_src); const wanted_info = dest_scalar_ty.intInfo(zcu); const wanted_bits = wanted_info.bits; @@ -10331,7 +10309,7 @@ fn intCast( const is_in_range = try block.addBinOp(.cmp_lte, operand, zero_inst); break :ok is_in_range; }; - try sema.addSafetyCheck(block, src, ok, if (safety_panics_are_enum) .invalid_enum_value else .cast_truncated_data); + try sema.addSafetyCheck(block, src, ok, .integer_out_of_bounds); } } @@ -10339,91 +10317,11 @@ fn intCast( } try sema.requireRuntimeBlock(block, src, operand_src); - if (runtime_safety and block.wantSafety()) { - if (zcu.backendSupportsFeature(.safety_checked_instructions)) { - _ = try sema.preparePanicId(src, .negative_to_unsigned); - _ = try sema.preparePanicId(src, .cast_truncated_data); - return block.addTyOp(.intcast_safe, dest_ty, operand); - } - const actual_info = operand_scalar_ty.intInfo(zcu); - const wanted_info = dest_scalar_ty.intInfo(zcu); - const actual_bits = actual_info.bits; - const wanted_bits = wanted_info.bits; - const actual_value_bits = actual_bits - @intFromBool(actual_info.signedness == .signed); - const wanted_value_bits = wanted_bits - @intFromBool(wanted_info.signedness == .signed); - - // range shrinkage - // requirement: int value fits into target type - if (wanted_value_bits < actual_value_bits) { - const dest_max_val_scalar = try dest_scalar_ty.maxIntScalar(pt, operand_scalar_ty); - const dest_max_val = try sema.splat(operand_ty, dest_max_val_scalar); - const dest_max = Air.internedToRef(dest_max_val.toIntern()); - - if (actual_info.signedness == .signed) { - const diff = try block.addBinOp(.sub_wrap, dest_max, operand); - - // Reinterpret the sign-bit as part of the value. This will make - // negative differences (`operand` > `dest_max`) appear too big. - const unsigned_scalar_operand_ty = try pt.intType(.unsigned, actual_bits); - const unsigned_operand_ty = if (is_vector) try pt.vectorType(.{ - .len = dest_ty.vectorLen(zcu), - .child = unsigned_scalar_operand_ty.toIntern(), - }) else unsigned_scalar_operand_ty; - const diff_unsigned = try block.addBitCast(unsigned_operand_ty, diff); - - // If the destination type is signed, then we need to double its - // range to account for negative values. - const dest_range_val = if (wanted_info.signedness == .signed) range_val: { - const one_scalar = try pt.intValue(unsigned_scalar_operand_ty, 1); - const one = if (is_vector) Value.fromInterned(try pt.intern(.{ .aggregate = .{ - .ty = unsigned_operand_ty.toIntern(), - .storage = .{ .repeated_elem = one_scalar.toIntern() }, - } })) else one_scalar; - const range_minus_one = try dest_max_val.shl(one, unsigned_operand_ty, sema.arena, pt); - const result = try arith.addWithOverflow(sema, unsigned_operand_ty, range_minus_one, one); - assert(result.overflow_bit.compareAllWithZero(.eq, zcu)); - break :range_val result.wrapped_result; - } else try pt.getCoerced(dest_max_val, unsigned_operand_ty); - const dest_range = Air.internedToRef(dest_range_val.toIntern()); - - const ok = if (is_vector) ok: { - const is_in_range = try block.addCmpVector(diff_unsigned, dest_range, .lte); - const all_in_range = try block.addReduce(is_in_range, .And); - break :ok all_in_range; - } else ok: { - const is_in_range = try block.addBinOp(.cmp_lte, diff_unsigned, dest_range); - break :ok is_in_range; - }; - // TODO negative_to_unsigned? - try sema.addSafetyCheck(block, src, ok, if (safety_panics_are_enum) .invalid_enum_value else .cast_truncated_data); - } else { - const ok = if (is_vector) ok: { - const is_in_range = try block.addCmpVector(operand, dest_max, .lte); - const all_in_range = try block.addReduce(is_in_range, .And); - break :ok all_in_range; - } else ok: { - const is_in_range = try block.addBinOp(.cmp_lte, operand, dest_max); - break :ok is_in_range; - }; - try sema.addSafetyCheck(block, src, ok, if (safety_panics_are_enum) .invalid_enum_value else .cast_truncated_data); - } - } else if (actual_info.signedness == .signed and wanted_info.signedness == .unsigned) { - // no shrinkage, yes sign loss - // requirement: signed to unsigned >= 0 - const ok = if (is_vector) ok: { - const scalar_zero = try pt.intValue(operand_scalar_ty, 0); - const zero_val = try sema.splat(operand_ty, scalar_zero); - const zero_inst = Air.internedToRef(zero_val.toIntern()); - const is_in_range = try block.addCmpVector(operand, zero_inst, .gte); - const all_in_range = try block.addReduce(is_in_range, .And); - break :ok all_in_range; - } else ok: { - const zero_inst = Air.internedToRef((try pt.intValue(operand_ty, 0)).toIntern()); - const is_in_range = try block.addBinOp(.cmp_gte, operand, zero_inst); - break :ok is_in_range; - }; - try sema.addSafetyCheck(block, src, ok, if (safety_panics_are_enum) .invalid_enum_value else .negative_to_unsigned); + if (block.wantSafety()) { + if (zcu.backendSupportsFeature(.panic_fn)) { + _ = try sema.preparePanicId(src, .integer_out_of_bounds); } + return block.addTyOp(.intcast_safe, dest_ty, operand); } return block.addTyOp(.intcast, dest_ty, operand); } @@ -10640,17 +10538,7 @@ fn zirFloatCast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A if (dst_bits >= src_bits) { return sema.coerce(block, dest_ty, operand, operand_src); } - if (!is_vector or zcu.backendSupportsFeature(.all_vector_instructions)) { - return block.addTyOp(.fptrunc, dest_ty, operand); - } - const vec_len = operand_ty.vectorLen(zcu); - const new_elems = try sema.arena.alloc(Air.Inst.Ref, vec_len); - for (new_elems, 0..) |*new_elem, i| { - const idx_ref = try pt.intRef(Type.usize, i); - const old_elem = try block.addBinOp(.array_elem_val, operand, idx_ref); - new_elem.* = try block.addTyOp(.fptrunc, dest_scalar_ty, old_elem); - } - return block.addAggregateInit(dest_ty, new_elems); + return block.addTyOp(.fptrunc, dest_ty, operand); } fn zirElemVal(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { @@ -10675,7 +10563,7 @@ fn zirElemValNode(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; const array = try sema.resolveInst(extra.lhs); const uncoerced_elem_index = try sema.resolveInst(extra.rhs); - const elem_index = try sema.coerce(block, Type.usize, uncoerced_elem_index, elem_index_src); + const elem_index = try sema.coerce(block, .usize, uncoerced_elem_index, elem_index_src); return sema.elemVal(block, src, array, elem_index, elem_index_src, true); } @@ -10685,7 +10573,7 @@ fn zirElemValImm(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError! const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].elem_val_imm; const array = try sema.resolveInst(inst_data.operand); - const elem_index = try sema.pt.intRef(Type.usize, inst_data.idx); + const elem_index = try sema.pt.intRef(.usize, inst_data.idx); return sema.elemVal(block, LazySrcLoc.unneeded, array, elem_index, LazySrcLoc.unneeded, false); } @@ -10728,7 +10616,7 @@ fn zirElemPtrNode(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; const array_ptr = try sema.resolveInst(extra.lhs); const uncoerced_elem_index = try sema.resolveInst(extra.rhs); - const elem_index = try sema.coerce(block, Type.usize, uncoerced_elem_index, elem_index_src); + const elem_index = try sema.coerce(block, .usize, uncoerced_elem_index, elem_index_src); return sema.elemPtr(block, src, array_ptr, elem_index, elem_index_src, false, true); } @@ -10742,7 +10630,7 @@ fn zirArrayInitElemPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Compile const src = block.nodeOffset(inst_data.src_node); const extra = sema.code.extraData(Zir.Inst.ElemPtrImm, inst_data.payload_index).data; const array_ptr = try sema.resolveInst(extra.ptr); - const elem_index = try pt.intRef(Type.usize, extra.index); + const elem_index = try pt.intRef(.usize, extra.index); const array_ty = sema.typeOf(array_ptr).childType(zcu); switch (array_ty.zigTypeTag(zcu)) { .array, .vector => {}, @@ -11104,7 +10992,7 @@ const SwitchProngAnalysis = struct { if (operand_ty.zigTypeTag(zcu) == .@"union") { const field_index: u32 = @intCast(operand_ty.unionTagFieldIndex(item_val, zcu).?); const union_obj = zcu.typeToUnion(operand_ty).?; - const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[field_index]); + const field_ty: Type = .fromInterned(union_obj.field_types.get(ip)[field_index]); if (capture_byref) { const ptr_field_ty = try pt.ptrTypeSema(.{ .child = field_ty.toIntern(), @@ -11154,7 +11042,7 @@ const SwitchProngAnalysis = struct { const first_item_val = sema.resolveConstDefinedValue(block, LazySrcLoc.unneeded, case_vals[0], undefined) catch unreachable; const first_field_index: u32 = zcu.unionTagFieldIndex(union_obj, first_item_val).?; - const first_field_ty = Type.fromInterned(union_obj.field_types.get(ip)[first_field_index]); + const first_field_ty: Type = .fromInterned(union_obj.field_types.get(ip)[first_field_index]); const field_indices = try sema.arena.alloc(u32, case_vals.len); for (case_vals, field_indices) |item, *field_idx| { @@ -11165,7 +11053,7 @@ const SwitchProngAnalysis = struct { // Fast path: if all the operands are the same type already, we don't need to hit // PTR! This will also allow us to emit simpler code. const same_types = for (field_indices[1..]) |field_idx| { - const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[field_idx]); + const field_ty: Type = .fromInterned(union_obj.field_types.get(ip)[field_idx]); if (!field_ty.eql(first_field_ty, zcu)) break false; } else true; @@ -11173,7 +11061,7 @@ const SwitchProngAnalysis = struct { // We need values to run PTR on, so make a bunch of undef constants. const dummy_captures = try sema.arena.alloc(Air.Inst.Ref, case_vals.len); for (dummy_captures, field_indices) |*dummy, field_idx| { - const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[field_idx]); + const field_ty: Type = .fromInterned(union_obj.field_types.get(ip)[field_idx]); dummy.* = try pt.undefRef(field_ty); } @@ -11208,7 +11096,7 @@ const SwitchProngAnalysis = struct { // We need values to run PTR on, so make a bunch of undef constants. const dummy_captures = try sema.arena.alloc(Air.Inst.Ref, case_vals.len); for (field_indices, dummy_captures) |field_idx, *dummy| { - const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[field_idx]); + const field_ty: Type = .fromInterned(union_obj.field_types.get(ip)[field_idx]); const field_ptr_ty = try pt.ptrTypeSema(.{ .child = field_ty.toIntern(), .flags = .{ @@ -11271,7 +11159,7 @@ const SwitchProngAnalysis = struct { // If we can, try to avoid that using in-memory coercions. const first_non_imc = in_mem: { for (field_indices, 0..) |field_idx, i| { - const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[field_idx]); + const field_ty: Type = .fromInterned(union_obj.field_types.get(ip)[field_idx]); if (.ok != try sema.coerceInMemoryAllowed(block, capture_ty, field_ty, false, zcu.getTarget(), LazySrcLoc.unneeded, LazySrcLoc.unneeded, null)) { break :in_mem i; } @@ -11294,7 +11182,7 @@ const SwitchProngAnalysis = struct { { const next = first_non_imc + 1; for (field_indices[next..], next..) |field_idx, i| { - const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[field_idx]); + const field_ty: Type = .fromInterned(union_obj.field_types.get(ip)[field_idx]); if (.ok != try sema.coerceInMemoryAllowed(block, capture_ty, field_ty, false, zcu.getTarget(), LazySrcLoc.unneeded, LazySrcLoc.unneeded, null)) { in_mem_coercible.unset(i); } @@ -11341,7 +11229,7 @@ const SwitchProngAnalysis = struct { }; const field_idx = field_indices[idx]; - const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[field_idx]); + const field_ty: Type = .fromInterned(union_obj.field_types.get(ip)[field_idx]); const uncoerced = try coerce_block.addStructFieldVal(operand_val, field_idx, field_ty); const coerced = try sema.coerce(&coerce_block, capture_ty, uncoerced, case_src); _ = try coerce_block.addBr(capture_block_inst, coerced); @@ -11365,7 +11253,7 @@ const SwitchProngAnalysis = struct { const first_imc_item_idx = in_mem_coercible.findFirstSet().?; const first_imc_field_idx = field_indices[first_imc_item_idx]; - const first_imc_field_ty = Type.fromInterned(union_obj.field_types.get(ip)[first_imc_field_idx]); + const first_imc_field_ty: Type = .fromInterned(union_obj.field_types.get(ip)[first_imc_field_idx]); const uncoerced = try coerce_block.addStructFieldVal(operand_val, first_imc_field_idx, first_imc_field_ty); const coerced = try coerce_block.addBitCast(capture_ty, uncoerced); _ = try coerce_block.addBr(capture_block_inst, coerced); @@ -13165,7 +13053,7 @@ fn analyzeSwitchRuntimeBlock( for (seen_enum_fields, 0..) |seen_field, index| { if (seen_field != null) continue; const union_obj = zcu.typeToUnion(maybe_union_ty).?; - const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[index]); + const field_ty: Type = .fromInterned(union_obj.field_types.get(ip)[index]); if (field_ty.zigTypeTag(zcu) != .noreturn) break true; } else false else @@ -13490,7 +13378,7 @@ const RangeSetUnhandledIterator = struct { inline .u64, .i64 => |val_int| { const next_int = @addWithOverflow(val_int, 1); if (next_int[1] == 0) - return (try it.pt.intValue(Type.fromInterned(int.ty), next_int[0])).toIntern(); + return (try it.pt.intValue(.fromInterned(int.ty), next_int[0])).toIntern(); }, .big_int => {}, .lazy_align, .lazy_size => unreachable, @@ -13506,7 +13394,7 @@ const RangeSetUnhandledIterator = struct { ); result_bigint.addScalar(val_bigint, 1); - return (try it.pt.intValue_big(Type.fromInterned(int.ty), result_bigint.toConst())).toIntern(); + return (try it.pt.intValue_big(.fromInterned(int.ty), result_bigint.toConst())).toIntern(); } fn next(it: *RangeSetUnhandledIterator) !?InternPool.Index { @@ -13636,7 +13524,7 @@ fn validateErrSetSwitch( .{}, ); } - return Type.anyerror; + return .anyerror; }, else => |err_set_ty_index| else_validation: { const error_names = ip.indexToKey(err_set_ty_index).error_set_type.names; @@ -13839,7 +13727,7 @@ fn validateSwitchItemBool( item_ref: Zir.Inst.Ref, item_src: LazySrcLoc, ) CompileError!Air.Inst.Ref { - const item = try sema.resolveSwitchItemVal(block, item_ref, Type.bool, item_src); + const item = try sema.resolveSwitchItemVal(block, item_ref, .bool, item_src); if (Value.fromInterned(item.val).toBool()) { true_count.* += 1; } else { @@ -14224,7 +14112,7 @@ fn zirShl( return lhs; } if (air_tag != .shl_sat and scalar_ty.zigTypeTag(zcu) != .comptime_int) { - const bit_value = try pt.intValue(Type.comptime_int, scalar_ty.intInfo(zcu).bits); + const bit_value = try pt.intValue(.comptime_int, scalar_ty.intInfo(zcu).bits); if (rhs_ty.zigTypeTag(zcu) == .vector) { var i: usize = 0; while (i < rhs_ty.vectorLen(zcu)) : (i += 1) { @@ -14335,7 +14223,7 @@ fn zirShl( } if (air_tag == .shl_exact) { - const op_ov_tuple_ty = try sema.overflowArithmeticTupleType(lhs_ty); + const op_ov_tuple_ty = try pt.overflowArithmeticTupleType(lhs_ty); const op_ov = try block.addInst(.{ .tag = .shl_with_overflow, .data = .{ .ty_pl = .{ @@ -14351,8 +14239,7 @@ fn zirShl( try block.addReduce(ov_bit, .Or) else ov_bit; - const zero_ov = Air.internedToRef((try pt.intValue(Type.u1, 0)).toIntern()); - const no_ov = try block.addBinOp(.cmp_eq, any_ov_bit, zero_ov); + const no_ov = try block.addBinOp(.cmp_eq, any_ov_bit, .zero_u1); try sema.addSafetyCheck(block, src, no_ov, .shl_overflow); return sema.tupleFieldValByIndex(block, op_ov, 0, op_ov_tuple_ty); @@ -14406,7 +14293,7 @@ fn zirShr( return lhs; } if (scalar_ty.zigTypeTag(zcu) != .comptime_int) { - const bit_value = try pt.intValue(Type.comptime_int, scalar_ty.intInfo(zcu).bits); + const bit_value = try pt.intValue(.comptime_int, scalar_ty.intInfo(zcu).bits); if (rhs_ty.zigTypeTag(zcu) == .vector) { var i: usize = 0; while (i < rhs_ty.vectorLen(zcu)) : (i += 1) { @@ -14689,7 +14576,7 @@ fn analyzeTupleCat( try sema.tupleFieldValByIndex(block, rhs, i, rhs_ty); } - return block.addAggregateInit(Type.fromInterned(tuple_ty), element_refs); + return block.addAggregateInit(.fromInterned(tuple_ty), element_refs); } fn zirArrayCat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { @@ -14716,7 +14603,7 @@ fn zirArrayCat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const rhs_src = block.src(.{ .node_offset_bin_rhs = inst_data.src_node }); const lhs_info = try sema.getArrayCatInfo(block, lhs_src, lhs, rhs_ty) orelse lhs_info: { - if (lhs_is_tuple) break :lhs_info @as(Type.ArrayInfo, undefined); + if (lhs_is_tuple) break :lhs_info undefined; return sema.fail(block, lhs_src, "expected indexable; found '{}'", .{lhs_ty.fmt(pt)}); }; const rhs_info = try sema.getArrayCatInfo(block, rhs_src, rhs, lhs_ty) orelse { @@ -14892,7 +14779,7 @@ fn zirArrayCat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai // lhs_dest_slice = dest[0..lhs.len] const slice_ty_ref = Air.internedToRef(slice_ty.toIntern()); - const lhs_len_ref = try pt.intRef(Type.usize, lhs_len); + const lhs_len_ref = try pt.intRef(.usize, lhs_len); const lhs_dest_slice = try block.addInst(.{ .tag = .slice, .data = .{ .ty_pl = .{ @@ -14907,7 +14794,7 @@ fn zirArrayCat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai _ = try block.addBinOp(.memcpy, lhs_dest_slice, lhs); // rhs_dest_slice = dest[lhs.len..][0..rhs.len] - const rhs_len_ref = try pt.intRef(Type.usize, rhs_len); + const rhs_len_ref = try pt.intRef(.usize, rhs_len); const rhs_dest_offset = try block.addInst(.{ .tag = .ptr_add, .data = .{ .ty_pl = .{ @@ -14932,7 +14819,7 @@ fn zirArrayCat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai _ = try block.addBinOp(.memcpy, rhs_dest_slice, rhs); if (res_sent_val) |sent_val| { - const elem_index = try pt.intRef(Type.usize, result_len); + const elem_index = try pt.intRef(.usize, result_len); const elem_ptr = try block.addPtrElemPtr(mutable_alloc, elem_index, elem_ptr_ty); const init = Air.internedToRef((try pt.getCoerced(sent_val, lhs_info.elem_type)).toIntern()); try sema.storePtr2(block, src, elem_ptr, src, init, lhs_src, .store); @@ -14943,7 +14830,7 @@ fn zirArrayCat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai var elem_i: u32 = 0; while (elem_i < lhs_len) : (elem_i += 1) { - const elem_index = try pt.intRef(Type.usize, elem_i); + const elem_index = try pt.intRef(.usize, elem_i); const elem_ptr = try block.addPtrElemPtr(mutable_alloc, elem_index, elem_ptr_ty); const operand_src = block.src(.{ .array_cat_lhs = .{ .array_cat_offset = inst_data.src_node, @@ -14954,8 +14841,8 @@ fn zirArrayCat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai } while (elem_i < result_len) : (elem_i += 1) { const rhs_elem_i = elem_i - lhs_len; - const elem_index = try pt.intRef(Type.usize, elem_i); - const rhs_index = try pt.intRef(Type.usize, rhs_elem_i); + const elem_index = try pt.intRef(.usize, elem_i); + const rhs_index = try pt.intRef(.usize, rhs_elem_i); const elem_ptr = try block.addPtrElemPtr(mutable_alloc, elem_index, elem_ptr_ty); const operand_src = block.src(.{ .array_cat_rhs = .{ .array_cat_offset = inst_data.src_node, @@ -14965,7 +14852,7 @@ fn zirArrayCat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai try sema.storePtr2(block, src, elem_ptr, src, init, operand_src, .store); } if (res_sent_val) |sent_val| { - const elem_index = try pt.intRef(Type.usize, result_len); + const elem_index = try pt.intRef(.usize, result_len); const elem_ptr = try block.addPtrElemPtr(mutable_alloc, elem_index, elem_ptr_ty); const init = Air.internedToRef((try pt.getCoerced(sent_val, lhs_info.elem_type)).toIntern()); try sema.storePtr2(block, src, elem_ptr, src, init, lhs_src, .store); @@ -14978,7 +14865,7 @@ fn zirArrayCat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai { var elem_i: u32 = 0; while (elem_i < lhs_len) : (elem_i += 1) { - const index = try pt.intRef(Type.usize, elem_i); + const index = try pt.intRef(.usize, elem_i); const operand_src = block.src(.{ .array_cat_lhs = .{ .array_cat_offset = inst_data.src_node, .elem_index = elem_i, @@ -14988,7 +14875,7 @@ fn zirArrayCat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai } while (elem_i < result_len) : (elem_i += 1) { const rhs_elem_i = elem_i - lhs_len; - const index = try pt.intRef(Type.usize, rhs_elem_i); + const index = try pt.intRef(.usize, rhs_elem_i); const operand_src = block.src(.{ .array_cat_rhs = .{ .array_cat_offset = inst_data.src_node, .elem_index = @intCast(rhs_elem_i), @@ -15012,8 +14899,8 @@ fn getArrayCatInfo(sema: *Sema, block: *Block, src: LazySrcLoc, operand: Air.Ins switch (ptr_info.flags.size) { .slice => { const val = try sema.resolveConstDefinedValue(block, src, operand, .{ .simple = .slice_cat_operand }); - return Type.ArrayInfo{ - .elem_type = Type.fromInterned(ptr_info.child), + return .{ + .elem_type = .fromInterned(ptr_info.child), .sentinel = switch (ptr_info.sentinel) { .none => null, else => Value.fromInterned(ptr_info.sentinel), @@ -15113,7 +15000,7 @@ fn analyzeTupleMul( @memcpy(element_refs[tuple_len * i ..][0..tuple_len], element_refs[0..tuple_len]); } - return block.addAggregateInit(Type.fromInterned(tuple_ty), element_refs); + return block.addAggregateInit(.fromInterned(tuple_ty), element_refs); } fn zirArrayMul(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { @@ -15166,7 +15053,7 @@ fn zirArrayMul(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai if (lhs_ty.isTuple(zcu)) { // In `**` rhs must be comptime-known, but lhs can be runtime-known - const factor = try sema.resolveInt(block, rhs_src, extra.rhs, Type.usize, .{ .simple = .array_mul_factor }); + const factor = try sema.resolveInt(block, rhs_src, extra.rhs, .usize, .{ .simple = .array_mul_factor }); const factor_casted = try sema.usizeCast(block, rhs_src, factor); return sema.analyzeTupleMul(block, inst_data.src_node, lhs, factor_casted); } @@ -15188,7 +15075,7 @@ fn zirArrayMul(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai }; // In `**` rhs must be comptime-known, but lhs can be runtime-known - const factor = try sema.resolveInt(block, rhs_src, extra.rhs, Type.usize, .{ .simple = .array_mul_factor }); + const factor = try sema.resolveInt(block, rhs_src, extra.rhs, .usize, .{ .simple = .array_mul_factor }); const result_len_u64 = std.math.mul(u64, lhs_info.len, factor) catch return sema.fail(block, rhs_src, "operation results in overflow", .{}); @@ -15246,7 +15133,7 @@ fn zirArrayMul(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai // to get the same elem values. const lhs_vals = try sema.arena.alloc(Air.Inst.Ref, lhs_len); for (lhs_vals, 0..) |*lhs_val, idx| { - const idx_ref = try pt.intRef(Type.usize, idx); + const idx_ref = try pt.intRef(.usize, idx); lhs_val.* = try sema.elemVal(block, lhs_src, lhs, idx_ref, src, false); } @@ -15267,14 +15154,14 @@ fn zirArrayMul(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai var elem_i: usize = 0; while (elem_i < result_len) { for (lhs_vals) |lhs_val| { - const elem_index = try pt.intRef(Type.usize, elem_i); + const elem_index = try pt.intRef(.usize, elem_i); const elem_ptr = try block.addPtrElemPtr(alloc, elem_index, elem_ptr_ty); try sema.storePtr2(block, src, elem_ptr, src, lhs_val, lhs_src, .store); elem_i += 1; } } if (lhs_info.sentinel) |sent_val| { - const elem_index = try pt.intRef(Type.usize, result_len); + const elem_index = try pt.intRef(.usize, result_len); const elem_ptr = try block.addPtrElemPtr(alloc, elem_index, elem_ptr_ty); const init = Air.internedToRef(sent_val.toIntern()); try sema.storePtr2(block, src, elem_ptr, src, init, lhs_src, .store); @@ -16131,15 +16018,14 @@ fn zirOverflowArithmetic( const maybe_lhs_val = try sema.resolveValue(lhs); const maybe_rhs_val = try sema.resolveValue(rhs); - const tuple_ty = try sema.overflowArithmeticTupleType(dest_ty); - const overflow_ty = Type.fromInterned(ip.indexToKey(tuple_ty.toIntern()).tuple_type.types.get(ip)[1]); + const tuple_ty = try pt.overflowArithmeticTupleType(dest_ty); + const overflow_ty: Type = .fromInterned(ip.indexToKey(tuple_ty.toIntern()).tuple_type.types.get(ip)[1]); var result: struct { inst: Air.Inst.Ref = .none, wrapped: Value = Value.@"unreachable", overflow_bit: Value, } = result: { - const zero_bit = try pt.intValue(Type.u1, 0); switch (zir_tag) { .add_with_overflow => { // If either of the arguments is zero, `false` is returned and the other is stored @@ -16147,12 +16033,12 @@ fn zirOverflowArithmetic( // Otherwise, if either of the argument is undefined, undefined is returned. if (maybe_lhs_val) |lhs_val| { if (!lhs_val.isUndef(zcu) and (try lhs_val.compareAllWithZeroSema(.eq, pt))) { - break :result .{ .overflow_bit = try sema.splat(overflow_ty, zero_bit), .inst = rhs }; + break :result .{ .overflow_bit = try sema.splat(overflow_ty, .zero_u1), .inst = rhs }; } } if (maybe_rhs_val) |rhs_val| { if (!rhs_val.isUndef(zcu) and (try rhs_val.compareAllWithZeroSema(.eq, pt))) { - break :result .{ .overflow_bit = try sema.splat(overflow_ty, zero_bit), .inst = lhs }; + break :result .{ .overflow_bit = try sema.splat(overflow_ty, .zero_u1), .inst = lhs }; } } if (maybe_lhs_val) |lhs_val| { @@ -16173,7 +16059,7 @@ fn zirOverflowArithmetic( if (rhs_val.isUndef(zcu)) { break :result .{ .overflow_bit = Value.undef, .wrapped = Value.undef }; } else if (try rhs_val.compareAllWithZeroSema(.eq, pt)) { - break :result .{ .overflow_bit = try sema.splat(overflow_ty, zero_bit), .inst = lhs }; + break :result .{ .overflow_bit = try sema.splat(overflow_ty, .zero_u1), .inst = lhs }; } else if (maybe_lhs_val) |lhs_val| { if (lhs_val.isUndef(zcu)) { break :result .{ .overflow_bit = Value.undef, .wrapped = Value.undef }; @@ -16192,9 +16078,9 @@ fn zirOverflowArithmetic( if (maybe_lhs_val) |lhs_val| { if (!lhs_val.isUndef(zcu)) { if (try lhs_val.compareAllWithZeroSema(.eq, pt)) { - break :result .{ .overflow_bit = try sema.splat(overflow_ty, zero_bit), .inst = lhs }; + break :result .{ .overflow_bit = try sema.splat(overflow_ty, .zero_u1), .inst = lhs }; } else if (try sema.compareAll(lhs_val, .eq, try sema.splat(dest_ty, scalar_one), dest_ty)) { - break :result .{ .overflow_bit = try sema.splat(overflow_ty, zero_bit), .inst = rhs }; + break :result .{ .overflow_bit = try sema.splat(overflow_ty, .zero_u1), .inst = rhs }; } } } @@ -16202,9 +16088,9 @@ fn zirOverflowArithmetic( if (maybe_rhs_val) |rhs_val| { if (!rhs_val.isUndef(zcu)) { if (try rhs_val.compareAllWithZeroSema(.eq, pt)) { - break :result .{ .overflow_bit = try sema.splat(overflow_ty, zero_bit), .inst = rhs }; + break :result .{ .overflow_bit = try sema.splat(overflow_ty, .zero_u1), .inst = rhs }; } else if (try sema.compareAll(rhs_val, .eq, try sema.splat(dest_ty, scalar_one), dest_ty)) { - break :result .{ .overflow_bit = try sema.splat(overflow_ty, zero_bit), .inst = lhs }; + break :result .{ .overflow_bit = try sema.splat(overflow_ty, .zero_u1), .inst = lhs }; } } } @@ -16226,12 +16112,12 @@ fn zirOverflowArithmetic( // Oterhwise if either of the arguments is undefined, both results are undefined. if (maybe_lhs_val) |lhs_val| { if (!lhs_val.isUndef(zcu) and (try lhs_val.compareAllWithZeroSema(.eq, pt))) { - break :result .{ .overflow_bit = try sema.splat(overflow_ty, zero_bit), .inst = lhs }; + break :result .{ .overflow_bit = try sema.splat(overflow_ty, .zero_u1), .inst = lhs }; } } if (maybe_rhs_val) |rhs_val| { if (!rhs_val.isUndef(zcu) and (try rhs_val.compareAllWithZeroSema(.eq, pt))) { - break :result .{ .overflow_bit = try sema.splat(overflow_ty, zero_bit), .inst = lhs }; + break :result .{ .overflow_bit = try sema.splat(overflow_ty, .zero_u1), .inst = lhs }; } } if (maybe_lhs_val) |lhs_val| { @@ -16305,24 +16191,6 @@ fn splat(sema: *Sema, ty: Type, val: Value) !Value { return Value.fromInterned(repeated); } -fn overflowArithmeticTupleType(sema: *Sema, ty: Type) !Type { - const pt = sema.pt; - const zcu = pt.zcu; - const ip = &zcu.intern_pool; - const ov_ty = if (ty.zigTypeTag(zcu) == .vector) try pt.vectorType(.{ - .len = ty.vectorLen(zcu), - .child = .u1_type, - }) else Type.u1; - - const types = [2]InternPool.Index{ ty.toIntern(), ov_ty.toIntern() }; - const values = [2]InternPool.Index{ .none, .none }; - const tuple_ty = try ip.getTupleType(zcu.gpa, pt.tid, .{ - .types = &types, - .values = &values, - }); - return Type.fromInterned(tuple_ty); -} - fn analyzeArithmetic( sema: *Sema, block: *Block, @@ -16380,7 +16248,7 @@ fn analyzeArithmetic( const address = std.math.sub(u64, lhs_ptr.byte_offset, rhs_ptr.byte_offset) catch return sema.fail(block, src, "operation results in overflow", .{}); const result = address / elem_size; - return try pt.intRef(Type.usize, result); + return try pt.intRef(.usize, result); } else { break :runtime_src lhs_src; } @@ -16395,7 +16263,7 @@ fn analyzeArithmetic( const lhs_int = try block.addBitCast(.usize, lhs); const rhs_int = try block.addBitCast(.usize, rhs); const address = try block.addBinOp(.sub_wrap, lhs_int, rhs_int); - return try block.addBinOp(.div_exact, address, try pt.intRef(Type.usize, elem_size)); + return try block.addBinOp(.div_exact, address, try pt.intRef(.usize, elem_size)); } } else { switch (lhs_ty.ptrSize(zcu)) { @@ -16498,42 +16366,10 @@ fn analyzeArithmetic( } if (block.wantSafety() and want_safety and scalar_tag == .int) { - if (zcu.backendSupportsFeature(.safety_checked_instructions)) { - if (air_tag != air_tag_safe) { - _ = try sema.preparePanicId(src, .integer_overflow); - } - return block.addBinOp(air_tag_safe, casted_lhs, casted_rhs); - } else { - const maybe_op_ov: ?Air.Inst.Tag = switch (air_tag) { - .add => .add_with_overflow, - .sub => .sub_with_overflow, - .mul => .mul_with_overflow, - else => null, - }; - if (maybe_op_ov) |op_ov_tag| { - const op_ov_tuple_ty = try sema.overflowArithmeticTupleType(resolved_type); - const op_ov = try block.addInst(.{ - .tag = op_ov_tag, - .data = .{ .ty_pl = .{ - .ty = Air.internedToRef(op_ov_tuple_ty.toIntern()), - .payload = try sema.addExtra(Air.Bin{ - .lhs = casted_lhs, - .rhs = casted_rhs, - }), - } }, - }); - const ov_bit = try sema.tupleFieldValByIndex(block, op_ov, 1, op_ov_tuple_ty); - const any_ov_bit = if (resolved_type.zigTypeTag(zcu) == .vector) - try block.addReduce(ov_bit, .Or) - else - ov_bit; - const zero_ov = Air.internedToRef((try pt.intValue(Type.u1, 0)).toIntern()); - const no_ov = try block.addBinOp(.cmp_eq, any_ov_bit, zero_ov); - - try sema.addSafetyCheck(block, src, no_ov, .integer_overflow); - return sema.tupleFieldValByIndex(block, op_ov, 0, op_ov_tuple_ty); - } + if (air_tag != air_tag_safe and zcu.backendSupportsFeature(.panic_fn)) { + _ = try sema.preparePanicId(src, .integer_overflow); } + return block.addBinOp(air_tag_safe, casted_lhs, casted_rhs); } return block.addBinOp(air_tag, casted_lhs, casted_rhs); } @@ -16550,7 +16386,7 @@ fn analyzePtrArithmetic( ) CompileError!Air.Inst.Ref { // TODO if the operand is comptime-known to be negative, or is a negative int, // coerce to isize instead of usize. - const offset = try sema.coerce(block, Type.usize, uncasted_offset, offset_src); + const offset = try sema.coerce(block, .usize, uncasted_offset, offset_src); const pt = sema.pt; const zcu = pt.zcu; const opt_ptr_val = try sema.resolveValue(ptr); @@ -16736,8 +16572,8 @@ fn zirAsm( const uncasted_arg = try sema.resolveInst(input.data.operand); const uncasted_arg_ty = sema.typeOf(uncasted_arg); switch (uncasted_arg_ty.zigTypeTag(zcu)) { - .comptime_int => arg.* = try sema.coerce(block, Type.usize, uncasted_arg, src), - .comptime_float => arg.* = try sema.coerce(block, Type.f64, uncasted_arg, src), + .comptime_int => arg.* = try sema.coerce(block, .usize, uncasted_arg, src), + .comptime_float => arg.* = try sema.coerce(block, .f64, uncasted_arg, src), else => { arg.* = uncasted_arg; }, @@ -16860,9 +16696,7 @@ fn zirCmpEq( const runtime_src: LazySrcLoc = src: { if (try sema.resolveValue(lhs)) |lval| { if (try sema.resolveValue(rhs)) |rval| { - if (lval.isUndef(zcu) or rval.isUndef(zcu)) { - return pt.undefRef(Type.bool); - } + if (lval.isUndef(zcu) or rval.isUndef(zcu)) return .undef_bool; const lkey = zcu.intern_pool.indexToKey(lval.toIntern()); const rkey = zcu.intern_pool.indexToKey(rval.toIntern()); return if ((lkey.err.name == rkey.err.name) == (op == .eq)) @@ -16916,7 +16750,7 @@ fn analyzeCmpUnionTag( const coerced_union = try sema.coerce(block, union_tag_ty, un, un_src); if (try sema.resolveValue(coerced_tag)) |enum_val| { - if (enum_val.isUndef(zcu)) return pt.undefRef(Type.bool); + if (enum_val.isUndef(zcu)) return .undef_bool; const field_ty = union_ty.unionFieldType(enum_val, zcu).?; if (field_ty.zigTypeTag(zcu) == .noreturn) { return .bool_false; @@ -17027,8 +16861,8 @@ fn cmpSelf( const maybe_lhs_val = try sema.resolveValue(casted_lhs); const maybe_rhs_val = try sema.resolveValue(casted_rhs); - if (maybe_lhs_val) |v| if (v.isUndef(zcu)) return pt.undefRef(Type.bool); - if (maybe_rhs_val) |v| if (v.isUndef(zcu)) return pt.undefRef(Type.bool); + if (maybe_lhs_val) |v| if (v.isUndef(zcu)) return .undef_bool; + if (maybe_rhs_val) |v| if (v.isUndef(zcu)) return .undef_bool; const runtime_src: LazySrcLoc = src: { if (maybe_lhs_val) |lhs_val| { @@ -17083,7 +16917,7 @@ fn runtimeBoolCmp( ) CompileError!Air.Inst.Ref { if ((op == .neq) == rhs) { try sema.requireRuntimeBlock(block, src, runtime_src); - return block.addTyOp(.not, Type.bool, lhs); + return block.addTyOp(.not, .bool, lhs); } else { return lhs; } @@ -17107,7 +16941,7 @@ fn zirSizeOf(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. .comptime_float, .comptime_int, .void, - => return pt.intRef(Type.comptime_int, 0), + => return .zero, .bool, .int, @@ -17148,7 +16982,7 @@ fn zirBitSizeOf(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A .comptime_float, .comptime_int, .void, - => return pt.intRef(Type.comptime_int, 0), + => return .zero, .bool, .int, @@ -17167,7 +17001,7 @@ fn zirBitSizeOf(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A => {}, } const bit_size = try operand_ty.bitSizeSema(pt); - return pt.intRef(Type.comptime_int, bit_size); + return pt.intRef(.comptime_int, bit_size); } fn zirThis( @@ -17285,7 +17119,7 @@ fn zirClosureGet(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstDat assert(block.is_typeof); // We need a dummy runtime instruction with the correct type. - return block.addTy(.alloc, Type.fromInterned(capture_ty)); + return block.addTy(.alloc, .fromInterned(capture_ty)); } fn zirRetAddr( @@ -17293,10 +17127,11 @@ fn zirRetAddr( block: *Block, extended: Zir.Inst.Extended.InstData, ) CompileError!Air.Inst.Ref { + _ = sema; _ = extended; if (block.isComptime()) { // TODO: we could give a meaningful lazy value here. #14938 - return sema.pt.intRef(Type.usize, 0); + return .zero_usize; } else { return block.addNoOp(.ret_addr); } @@ -17349,7 +17184,7 @@ fn zirBuiltinSrc( } }, .byte_offset = 0, } }), - .len = (try pt.intValue(Type.usize, func_name_len)).toIntern(), + .len = (try pt.intValue(.usize, func_name_len)).toIntern(), } }); }; @@ -17375,7 +17210,7 @@ fn zirBuiltinSrc( } }, .byte_offset = 0, } }), - .len = (try pt.intValue(Type.usize, module_name.len)).toIntern(), + .len = (try pt.intValue(.usize, module_name.len)).toIntern(), } }); }; @@ -17401,7 +17236,7 @@ fn zirBuiltinSrc( } }, .byte_offset = 0, } }), - .len = (try pt.intValue(Type.usize, file_name.len)).toIntern(), + .len = (try pt.intValue(.usize, file_name.len)).toIntern(), } }); }; @@ -17414,9 +17249,9 @@ fn zirBuiltinSrc( // fn_name: [:0]const u8, func_name_val, // line: u32, - (try pt.intValue(Type.u32, extra.line + 1)).toIntern(), + (try pt.intValue(.u32, extra.line + 1)).toIntern(), // column: u32, - (try pt.intValue(Type.u32, extra.column + 1)).toIntern(), + (try pt.intValue(.u32, extra.column + 1)).toIntern(), }; return Air.internedToRef((try pt.intern(.{ .aggregate = .{ .ty = src_loc_ty.toIntern(), @@ -17511,7 +17346,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai } }, .byte_offset = 0, } }), - .len = (try pt.intValue(Type.usize, param_vals.len)).toIntern(), + .len = (try pt.intValue(.usize, param_vals.len)).toIntern(), } }); }; @@ -17564,7 +17399,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai // signedness: Signedness, (try pt.enumValueFieldIndex(signedness_ty, @intFromEnum(info.signedness))).toIntern(), // bits: u16, - (try pt.intValue(Type.u16, info.bits)).toIntern(), + (try pt.intValue(.u16, info.bits)).toIntern(), }; return Air.internedToRef((try pt.internUnion(.{ .ty = type_info_ty.toIntern(), @@ -17580,7 +17415,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const field_vals = .{ // bits: u16, - (try pt.intValue(Type.u16, ty.bitSize(zcu))).toIntern(), + (try pt.intValue(.u16, ty.bitSize(zcu))).toIntern(), }; return Air.internedToRef((try pt.internUnion(.{ .ty = type_info_ty.toIntern(), @@ -17594,7 +17429,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai .pointer => { const info = ty.ptrInfo(zcu); const alignment = if (info.flags.alignment.toByteUnits()) |alignment| - try pt.intValue(Type.comptime_int, alignment) + try pt.intValue(.comptime_int, alignment) else try Type.fromInterned(info.child).lazyAbiAlignment(pt); @@ -17638,7 +17473,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const info = ty.arrayInfo(zcu); const field_values = .{ // len: comptime_int, - (try pt.intValue(Type.comptime_int, info.len)).toIntern(), + (try pt.intValue(.comptime_int, info.len)).toIntern(), // child: type, info.elem_type.toIntern(), // sentinel: ?*const anyopaque, @@ -17659,7 +17494,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const info = ty.arrayInfo(zcu); const field_values = .{ // len: comptime_int, - (try pt.intValue(Type.comptime_int, info.len)).toIntern(), + (try pt.intValue(.comptime_int, info.len)).toIntern(), // child: type, info.elem_type.toIntern(), }; @@ -17723,7 +17558,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai } }, .byte_offset = 0, } }), - .len = (try pt.intValue(Type.usize, error_name_len)).toIntern(), + .len = (try pt.intValue(.usize, error_name_len)).toIntern(), } }); }; @@ -17770,7 +17605,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai } }, .byte_offset = 0, } }), - .len = (try pt.intValue(Type.usize, vals.len)).toIntern(), + .len = (try pt.intValue(.usize, vals.len)).toIntern(), } }); } else .none; const errors_val = try pt.intern(.{ .opt = .{ @@ -17819,7 +17654,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai .comptime_int_type, ) else - (try pt.intValue(Type.comptime_int, tag_index)).toIntern(); + (try pt.intValue(.comptime_int, tag_index)).toIntern(); // TODO: write something like getCoercedInts to avoid needing to dupe const name_val = v: { @@ -17844,7 +17679,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai } }, .byte_offset = 0, } }), - .len = (try pt.intValue(Type.usize, tag_name_len)).toIntern(), + .len = (try pt.intValue(.usize, tag_name_len)).toIntern(), } }); }; @@ -17887,7 +17722,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai } }, .byte_offset = 0, } }), - .len = (try pt.intValue(Type.usize, enum_field_vals.len)).toIntern(), + .len = (try pt.intValue(.usize, enum_field_vals.len)).toIntern(), } }); }; @@ -17949,7 +17784,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai } }, .byte_offset = 0, } }), - .len = (try pt.intValue(Type.usize, field_name_len)).toIntern(), + .len = (try pt.intValue(.usize, field_name_len)).toIntern(), } }); }; @@ -17965,7 +17800,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai // type: type, field_ty, // alignment: comptime_int, - (try pt.intValue(Type.comptime_int, alignment.toByteUnits() orelse 0)).toIntern(), + (try pt.intValue(.comptime_int, alignment.toByteUnits() orelse 0)).toIntern(), }; field_val.* = try pt.intern(.{ .aggregate = .{ .ty = union_field_ty.toIntern(), @@ -18000,7 +17835,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai } }, .byte_offset = 0, } }), - .len = (try pt.intValue(Type.usize, union_field_vals.len)).toIntern(), + .len = (try pt.intValue(.usize, union_field_vals.len)).toIntern(), } }); }; @@ -18070,7 +17905,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai } }, .byte_offset = 0, } }), - .len = (try pt.intValue(Type.usize, field_name_len)).toIntern(), + .len = (try pt.intValue(.usize, field_name_len)).toIntern(), } }); }; @@ -18089,7 +17924,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai // is_comptime: bool, Value.makeBool(is_comptime).toIntern(), // alignment: comptime_int, - (try pt.intValue(Type.comptime_int, Type.fromInterned(field_ty).abiAlignment(zcu).toByteUnits() orelse 0)).toIntern(), + (try pt.intValue(.comptime_int, Type.fromInterned(field_ty).abiAlignment(zcu).toByteUnits() orelse 0)).toIntern(), }; struct_field_val.* = try pt.intern(.{ .aggregate = .{ .ty = struct_field_ty.toIntern(), @@ -18111,7 +17946,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai else try ip.getOrPutStringFmt(gpa, pt.tid, "{d}", .{field_index}, .no_embedded_nulls); const field_name_len = field_name.length(ip); - const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[field_index]); + const field_ty: Type = .fromInterned(struct_type.field_types.get(ip)[field_index]); const field_init = struct_type.fieldInit(ip, field_index); const field_is_comptime = struct_type.fieldIsComptime(ip, field_index); const name_val = v: { @@ -18134,7 +17969,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai } }, .byte_offset = 0, } }), - .len = (try pt.intValue(Type.usize, field_name_len)).toIntern(), + .len = (try pt.intValue(.usize, field_name_len)).toIntern(), } }); }; @@ -18159,7 +17994,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai // is_comptime: bool, Value.makeBool(field_is_comptime).toIntern(), // alignment: comptime_int, - (try pt.intValue(Type.comptime_int, alignment.toByteUnits() orelse 0)).toIntern(), + (try pt.intValue(.comptime_int, alignment.toByteUnits() orelse 0)).toIntern(), }; field_val.* = try pt.intern(.{ .aggregate = .{ .ty = struct_field_ty.toIntern(), @@ -18195,7 +18030,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai } }, .byte_offset = 0, } }), - .len = (try pt.intValue(Type.usize, struct_field_vals.len)).toIntern(), + .len = (try pt.intValue(.usize, struct_field_vals.len)).toIntern(), } }); }; @@ -18304,7 +18139,7 @@ fn typeInfoDecls( } }, .byte_offset = 0, } }), - .len = (try pt.intValue(Type.usize, decl_vals.items.len)).toIntern(), + .len = (try pt.intValue(.usize, decl_vals.items.len)).toIntern(), } }); } @@ -18354,7 +18189,7 @@ fn typeInfoNamespaceDecls( .byte_offset = 0, }, }), - .len = (try pt.intValue(Type.usize, name_len)).toIntern(), + .len = (try pt.intValue(.usize, name_len)).toIntern(), }, }); }; @@ -18373,7 +18208,7 @@ fn typeInfoNamespaceDecls( continue; } try sema.ensureNavResolved(block, src, nav, .fully); - const namespace_ty = Type.fromInterned(ip.getNav(nav).status.fully_resolved.val); + const namespace_ty: Type = .fromInterned(ip.getNav(nav).status.fully_resolved.val); try sema.typeInfoNamespaceDecls(block, src, namespace_ty.getNamespaceIndex(zcu).toOptional(), declaration_ty, decl_vals, seen_namespaces); } } @@ -18424,7 +18259,7 @@ fn log2IntType(sema: *Sema, block: *Block, operand: Type, src: LazySrcLoc) Compi const pt = sema.pt; const zcu = pt.zcu; switch (operand.zigTypeTag(zcu)) { - .comptime_int => return Type.comptime_int, + .comptime_int => return .comptime_int, .int => { const bits = operand.bitSize(zcu); const count = if (bits == 0) @@ -18512,14 +18347,12 @@ fn zirBoolNot(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air const operand_src = block.src(.{ .node_offset_un_op = inst_data.src_node }); const uncasted_operand = try sema.resolveInst(inst_data.operand); - const operand = try sema.coerce(block, Type.bool, uncasted_operand, operand_src); + const operand = try sema.coerce(block, .bool, uncasted_operand, operand_src); if (try sema.resolveValue(operand)) |val| { - return if (val.isUndef(zcu)) - pt.undefRef(Type.bool) - else if (val.toBool()) .bool_false else .bool_true; + return if (val.isUndef(zcu)) .undef_bool else if (val.toBool()) .bool_false else .bool_true; } try sema.requireRuntimeBlock(block, src, null); - return block.addTyOp(.not, Type.bool, operand); + return block.addTyOp(.not, .bool, operand); } fn zirBoolBr( @@ -18544,7 +18377,7 @@ fn zirBoolBr( const lhs_src = parent_block.src(.{ .node_offset_bin_lhs = inst_data.src_node }); const rhs_src = parent_block.src(.{ .node_offset_bin_rhs = inst_data.src_node }); - const lhs = try sema.coerce(parent_block, Type.bool, uncoerced_lhs, lhs_src); + const lhs = try sema.coerce(parent_block, .bool, uncoerced_lhs, lhs_src); if (try sema.resolveDefinedValue(parent_block, lhs_src, lhs)) |lhs_val| { if (is_bool_or and lhs_val.toBool()) { @@ -18559,7 +18392,7 @@ fn zirBoolBr( if (sema.typeOf(rhs_result).isNoReturn(zcu)) { return rhs_result; } - return sema.coerce(parent_block, Type.bool, rhs_result, rhs_src); + return sema.coerce(parent_block, .bool, rhs_result, rhs_src); } const block_inst: Air.Inst.Index = @enumFromInt(sema.air_instructions.len); @@ -18596,7 +18429,7 @@ fn zirBoolBr( const rhs_result = try sema.resolveInlineBody(rhs_block, body, inst); const rhs_noret = sema.typeOf(rhs_result).isNoReturn(zcu); const coerced_rhs_result = if (!rhs_noret) rhs: { - const coerced_result = try sema.coerce(rhs_block, Type.bool, rhs_result, rhs_src); + const coerced_result = try sema.coerce(rhs_block, .bool, rhs_result, rhs_src); _ = try rhs_block.addBr(block_inst, coerced_result); break :rhs coerced_result; } else rhs_result; @@ -18797,7 +18630,7 @@ fn zirCondbr( const else_body = sema.code.bodySlice(extra.end + then_body.len, extra.data.else_body_len); const uncasted_cond = try sema.resolveInst(extra.data.condition); - const cond = try sema.coerce(parent_block, Type.bool, uncasted_cond, cond_src); + const cond = try sema.coerce(parent_block, .bool, uncasted_cond, cond_src); if (try sema.resolveDefinedValue(parent_block, cond_src, cond)) |cond_val| { const body = if (cond_val.toBool()) then_body else else_body; @@ -19502,7 +19335,7 @@ fn zirPtrType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air const abi_align: Alignment = if (inst_data.flags.has_align) blk: { const ref: Zir.Inst.Ref = @enumFromInt(sema.code.extra[extra_i]); extra_i += 1; - const coerced = try sema.coerce(block, Type.u32, try sema.resolveInst(ref), align_src); + const coerced = try sema.coerce(block, .u32, try sema.resolveInst(ref), align_src); const val = try sema.resolveConstDefinedValue(block, align_src, coerced, .{ .simple = .@"align" }); // Check if this happens to be the lazy alignment of our element type, in // which case we can make this 0 without resolving it. @@ -19526,14 +19359,14 @@ fn zirPtrType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air const bit_offset: u16 = if (inst_data.flags.has_bit_range) blk: { const ref: Zir.Inst.Ref = @enumFromInt(sema.code.extra[extra_i]); extra_i += 1; - const bit_offset = try sema.resolveInt(block, bitoffset_src, ref, Type.u16, .{ .simple = .type }); + const bit_offset = try sema.resolveInt(block, bitoffset_src, ref, .u16, .{ .simple = .type }); break :blk @intCast(bit_offset); } else 0; const host_size: u16 = if (inst_data.flags.has_bit_range) blk: { const ref: Zir.Inst.Ref = @enumFromInt(sema.code.extra[extra_i]); extra_i += 1; - const host_size = try sema.resolveInt(block, hostsize_src, ref, Type.u16, .{ .simple = .type }); + const host_size = try sema.resolveInt(block, hostsize_src, ref, .u16, .{ .simple = .type }); break :blk @intCast(host_size); } else 0; @@ -19767,7 +19600,7 @@ fn unionInit( const zcu = pt.zcu; const ip = &zcu.intern_pool; const field_index = try sema.unionFieldIndex(block, union_ty, field_name, field_src); - const field_ty = Type.fromInterned(zcu.typeToUnion(union_ty).?.field_types.get(ip)[field_index]); + const field_ty: Type = .fromInterned(zcu.typeToUnion(union_ty).?.field_types.get(ip)[field_index]); const init = try sema.coerce(block, field_ty, uncasted_init, init_src); _ = union_ty_src; return unionInitFromEnumTag(sema, block, init_src, union_ty, field_index, init); @@ -19902,7 +19735,7 @@ fn zirStructInit( const field_index = try sema.unionFieldIndex(block, resolved_ty, field_name, field_src); const tag_ty = resolved_ty.unionTagTypeHypothetical(zcu); const tag_val = try pt.enumValueFieldIndex(tag_ty, field_index); - const field_ty = Type.fromInterned(zcu.typeToUnion(resolved_ty).?.field_types.get(ip)[field_index]); + const field_ty: Type = .fromInterned(zcu.typeToUnion(resolved_ty).?.field_types.get(ip)[field_index]); if (field_ty.zigTypeTag(zcu) == .noreturn) { return sema.failWithOwnedErrorMsg(block, msg: { @@ -19990,7 +19823,7 @@ fn finishStructInit( .init_node_offset = init_src.offset.node_offset.x, .elem_index = @intCast(i), } }); - const field_ty = Type.fromInterned(tuple.types.get(ip)[i]); + const field_ty: Type = .fromInterned(tuple.types.get(ip)[i]); field_inits[i] = try sema.coerce(block, field_ty, field_inits[i], field_src); continue; } @@ -20018,7 +19851,7 @@ fn finishStructInit( .init_node_offset = init_src.offset.node_offset.x, .elem_index = @intCast(i), } }); - const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[i]); + const field_ty: Type = .fromInterned(struct_type.field_types.get(ip)[i]); field_inits[i] = try sema.coerce(block, field_ty, field_inits[i], field_src); continue; } @@ -20183,7 +20016,7 @@ fn structInitAnon( const msg = try sema.errMsg(field_src, "opaque types have unknown size and therefore cannot be directly embedded in structs", .{}); errdefer msg.destroy(sema.gpa); - try sema.addDeclaredHereNote(msg, Type.fromInterned(field_ty.*)); + try sema.addDeclaredHereNote(msg, .fromInterned(field_ty.*)); break :msg msg; }; return sema.failWithOwnedErrorMsg(block, msg); @@ -20317,7 +20150,7 @@ fn structInitAnon( element_refs[i] = try sema.resolveInst(item.data.init); } - return block.addAggregateInit(Type.fromInterned(struct_ty), element_refs); + return block.addAggregateInit(.fromInterned(struct_ty), element_refs); } fn zirArrayInit( @@ -20441,7 +20274,7 @@ fn zirArrayInit( }); const elem_ptr_ty_ref = Air.internedToRef(elem_ptr_ty.toIntern()); - const index = try pt.intRef(Type.usize, i); + const index = try pt.intRef(.usize, i); const elem_ptr = try block.addPtrElemPtrTypeRef(base_ptr, index, elem_ptr_ty_ref); _ = try block.addBinOp(.store, elem_ptr, arg); } @@ -20455,7 +20288,7 @@ fn zirArrayInit( const elem_ptr_ty_ref = Air.internedToRef(elem_ptr_ty.toIntern()); for (resolved_args, 0..) |arg, i| { - const index = try pt.intRef(Type.usize, i); + const index = try pt.intRef(.usize, i); const elem_ptr = try block.addPtrElemPtrTypeRef(base_ptr, index, elem_ptr_ty_ref); _ = try block.addBinOp(.store, elem_ptr, arg); } @@ -20504,7 +20337,7 @@ fn arrayInitAnon( const msg = try sema.errMsg(operand_src, "opaque types have unknown size and therefore cannot be directly embedded in structs", .{}); errdefer msg.destroy(gpa); - try sema.addDeclaredHereNote(msg, Type.fromInterned(types[i])); + try sema.addDeclaredHereNote(msg, .fromInterned(types[i])); break :msg msg; }; return sema.failWithOwnedErrorMsg(block, msg); @@ -20561,7 +20394,7 @@ fn arrayInitAnon( element_refs[i] = try sema.resolveInst(operand); } - return block.addAggregateInit(Type.fromInterned(tuple_ty), element_refs); + return block.addAggregateInit(.fromInterned(tuple_ty), element_refs); } fn addConstantMaybeRef(sema: *Sema, val: InternPool.Index, is_ref: bool) !Air.Inst.Ref { @@ -20632,7 +20465,7 @@ fn fieldType( .optional => { // Struct/array init through optional requires the child type to not be a pointer. // If the child of .optional is a pointer it'll error on the next loop. - cur_ty = Type.fromInterned(ip.indexToKey(cur_ty.toIntern()).opt_type); + cur_ty = .fromInterned(ip.indexToKey(cur_ty.toIntern()).opt_type); continue; }, .error_union => { @@ -20710,44 +20543,32 @@ fn zirIntFromBool(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError const dest_ty: Type = if (is_vector) try pt.vectorType(.{ .child = .u1_type, .len = len }) else .u1; if (try sema.resolveValue(operand)) |val| { if (!is_vector) { - if (val.isUndef(zcu)) return pt.undefRef(Type.u1); - if (val.toBool()) return Air.internedToRef((try pt.intValue(Type.u1, 1)).toIntern()); - return Air.internedToRef((try pt.intValue(Type.u1, 0)).toIntern()); + return if (val.isUndef(zcu)) .undef_u1 else if (val.toBool()) .one_u1 else .zero_u1; } if (val.isUndef(zcu)) return pt.undefRef(dest_ty); const new_elems = try sema.arena.alloc(InternPool.Index, len); for (new_elems, 0..) |*new_elem, i| { const old_elem = try val.elemValue(pt, i); - const new_val = if (old_elem.isUndef(zcu)) - try pt.undefValue(Type.u1) + new_elem.* = if (old_elem.isUndef(zcu)) + .undef_u1 else if (old_elem.toBool()) - try pt.intValue(Type.u1, 1) + .one_u1 else - try pt.intValue(Type.u1, 0); - new_elem.* = new_val.toIntern(); + .zero_u1; } return Air.internedToRef(try pt.intern(.{ .aggregate = .{ .ty = dest_ty.toIntern(), .storage = .{ .elems = new_elems }, } })); } - if (!is_vector or zcu.backendSupportsFeature(.all_vector_instructions)) { - return block.addBitCast(dest_ty, operand); - } - const new_elems = try sema.arena.alloc(Air.Inst.Ref, len); - for (new_elems, 0..) |*new_elem, i| { - const idx_ref = try pt.intRef(Type.usize, i); - const old_elem = try block.addBinOp(.array_elem_val, operand, idx_ref); - new_elem.* = try block.addBitCast(.u1, old_elem); - } - return block.addAggregateInit(dest_ty, new_elems); + return block.addBitCast(dest_ty, operand); } fn zirErrorName(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node; const operand_src = block.builtinCallArgSrc(inst_data.src_node, 0); const uncoerced_operand = try sema.resolveInst(inst_data.operand); - const operand = try sema.coerce(block, Type.anyerror, uncoerced_operand, operand_src); + const operand = try sema.coerce(block, .anyerror, uncoerced_operand, operand_src); if (try sema.resolveDefinedValue(block, operand_src, operand)) |val| { const err_name = sema.pt.zcu.intern_pool.indexToKey(val.toIntern()).err.name; @@ -20993,12 +20814,12 @@ fn zirReify( .float => { const float = try sema.interpretBuiltinType(block, operand_src, .fromInterned(union_val.val), std.builtin.Type.Float); - const ty = switch (float.bits) { - 16 => Type.f16, - 32 => Type.f32, - 64 => Type.f64, - 80 => Type.f80, - 128 => Type.f128, + const ty: Type = switch (float.bits) { + 16 => .f16, + 32 => .f32, + 64 => .f64, + 80 => .f80, + 128 => .f128, else => return sema.fail(block, src, "{}-bit float unsupported", .{float.bits}), }; return Air.internedToRef(ty.toIntern()); @@ -21038,7 +20859,7 @@ fn zirReify( try ip.getOrPutString(gpa, pt.tid, "sentinel_ptr", .no_embedded_nulls), ).?); - if (!try sema.intFitsInType(alignment_val, Type.u32, null)) { + if (!try sema.intFitsInType(alignment_val, .u32, null)) { return sema.fail(block, src, "alignment must fit in 'u32'", .{}); } @@ -21174,7 +20995,7 @@ fn zirReify( }, .error_set => { const payload_val = Value.fromInterned(union_val.val).optionalValue(zcu) orelse - return Air.internedToRef(Type.anyerror.toIntern()); + return .anyerror_type; const names_val = try sema.derefSliceAsArray(block, src, payload_val, .{ .simple = .error_set_contents }); @@ -21776,7 +21597,7 @@ fn reifyUnion( errdefer if (!has_explicit_tag) ip.remove(pt.tid, enum_tag_ty); // remove generated tag type on error for (field_types) |field_ty_ip| { - const field_ty = Type.fromInterned(field_ty_ip); + const field_ty: Type = .fromInterned(field_ty_ip); if (field_ty.zigTypeTag(zcu) == .@"opaque") { return sema.failWithOwnedErrorMsg(block, msg: { const msg = try sema.errMsg(src, "opaque types have unknown size and therefore cannot be directly embedded in unions", .{}); @@ -22060,7 +21881,7 @@ fn reifyStruct( } if (any_aligned_fields) { - if (!try sema.intFitsInType(field_alignment_val, Type.u32, null)) { + if (!try sema.intFitsInType(field_alignment_val, .u32, null)) { return sema.fail(block, src, "alignment must fit in 'u32'", .{}); } @@ -22149,7 +21970,7 @@ fn reifyStruct( if (layout == .@"packed") { var fields_bit_sum: u64 = 0; for (0..struct_type.field_types.len) |field_idx| { - const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[field_idx]); + const field_ty: Type = .fromInterned(struct_type.field_types.get(ip)[field_idx]); field_ty.resolveLayout(pt) catch |err| switch (err) { error.AnalysisFail => { const msg = sema.err orelse return err; @@ -22325,7 +22146,7 @@ fn zirIntFromFloat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileErro if (block.wantSafety()) { const len = dest_ty.vectorLen(zcu); for (0..len) |i| { - const idx_ref = try pt.intRef(Type.usize, i); + const idx_ref = try pt.intRef(.usize, i); const elem_ref = try block.addBinOp(.array_elem_val, operand, idx_ref); const ok = try block.addBinOp(if (block.float_mode == .optimized) .cmp_eq_optimized else .cmp_eq, elem_ref, Air.internedToRef((try pt.floatValue(operand_scalar_ty, 0.0)).toIntern())); try sema.addSafetyCheck(block, src, ok, .integer_part_out_of_bounds); @@ -22336,42 +22157,23 @@ fn zirIntFromFloat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileErro .storage = .{ .repeated_elem = (try pt.intValue(dest_scalar_ty, 0)).toIntern() }, } })); } - if (!is_vector or zcu.backendSupportsFeature(.all_vector_instructions)) { - const result = try block.addTyOp(if (block.float_mode == .optimized) .int_from_float_optimized else .int_from_float, dest_ty, operand); - if (block.wantSafety()) { - const back = try block.addTyOp(.float_from_int, operand_ty, result); - const diff = try block.addBinOp(if (block.float_mode == .optimized) .sub_optimized else .sub, operand, back); - const ok = if (is_vector) ok: { - const ok_pos = try block.addCmpVector(diff, Air.internedToRef((try sema.splat(operand_ty, try pt.floatValue(operand_scalar_ty, 1.0))).toIntern()), .lt); - const ok_neg = try block.addCmpVector(diff, Air.internedToRef((try sema.splat(operand_ty, try pt.floatValue(operand_scalar_ty, -1.0))).toIntern()), .gt); - const ok = try block.addBinOp(.bit_and, ok_pos, ok_neg); - break :ok try block.addReduce(ok, .And); - } else ok: { - const ok_pos = try block.addBinOp(if (block.float_mode == .optimized) .cmp_lt_optimized else .cmp_lt, diff, Air.internedToRef((try pt.floatValue(operand_ty, 1.0)).toIntern())); - const ok_neg = try block.addBinOp(if (block.float_mode == .optimized) .cmp_gt_optimized else .cmp_gt, diff, Air.internedToRef((try pt.floatValue(operand_ty, -1.0)).toIntern())); - break :ok try block.addBinOp(.bool_and, ok_pos, ok_neg); - }; - try sema.addSafetyCheck(block, src, ok, .integer_part_out_of_bounds); - } - return result; + const result = try block.addTyOp(if (block.float_mode == .optimized) .int_from_float_optimized else .int_from_float, dest_ty, operand); + if (block.wantSafety()) { + const back = try block.addTyOp(.float_from_int, operand_ty, result); + const diff = try block.addBinOp(if (block.float_mode == .optimized) .sub_optimized else .sub, operand, back); + const ok = if (is_vector) ok: { + const ok_pos = try block.addCmpVector(diff, Air.internedToRef((try sema.splat(operand_ty, try pt.floatValue(operand_scalar_ty, 1.0))).toIntern()), .lt); + const ok_neg = try block.addCmpVector(diff, Air.internedToRef((try sema.splat(operand_ty, try pt.floatValue(operand_scalar_ty, -1.0))).toIntern()), .gt); + const ok = try block.addBinOp(.bit_and, ok_pos, ok_neg); + break :ok try block.addReduce(ok, .And); + } else ok: { + const ok_pos = try block.addBinOp(if (block.float_mode == .optimized) .cmp_lt_optimized else .cmp_lt, diff, Air.internedToRef((try pt.floatValue(operand_ty, 1.0)).toIntern())); + const ok_neg = try block.addBinOp(if (block.float_mode == .optimized) .cmp_gt_optimized else .cmp_gt, diff, Air.internedToRef((try pt.floatValue(operand_ty, -1.0)).toIntern())); + break :ok try block.addBinOp(.bool_and, ok_pos, ok_neg); + }; + try sema.addSafetyCheck(block, src, ok, .integer_part_out_of_bounds); } - const len = dest_ty.vectorLen(zcu); - const new_elems = try sema.arena.alloc(Air.Inst.Ref, len); - for (new_elems, 0..) |*new_elem, i| { - const idx_ref = try pt.intRef(Type.usize, i); - const old_elem = try block.addBinOp(.array_elem_val, operand, idx_ref); - const result = try block.addTyOp(if (block.float_mode == .optimized) .int_from_float_optimized else .int_from_float, dest_scalar_ty, old_elem); - if (block.wantSafety()) { - const back = try block.addTyOp(.float_from_int, operand_scalar_ty, result); - const diff = try block.addBinOp(.sub, old_elem, back); - const ok_pos = try block.addBinOp(if (block.float_mode == .optimized) .cmp_lt_optimized else .cmp_lt, diff, Air.internedToRef((try pt.floatValue(operand_scalar_ty, 1.0)).toIntern())); - const ok_neg = try block.addBinOp(if (block.float_mode == .optimized) .cmp_gt_optimized else .cmp_gt, diff, Air.internedToRef((try pt.floatValue(operand_scalar_ty, -1.0)).toIntern())); - const ok = try block.addBinOp(.bool_and, ok_pos, ok_neg); - try sema.addSafetyCheck(block, src, ok, .integer_part_out_of_bounds); - } - new_elem.* = result; - } - return block.addAggregateInit(dest_ty, new_elems); + return result; } fn zirFloatFromInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { @@ -22386,7 +22188,6 @@ fn zirFloatFromInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileErro const operand_ty = sema.typeOf(operand); try sema.checkVectorizableBinaryOperands(block, operand_src, dest_ty, operand_ty, src, operand_src); - const is_vector = dest_ty.zigTypeTag(zcu) == .vector; const dest_scalar_ty = dest_ty.scalarType(zcu); const operand_scalar_ty = operand_ty.scalarType(zcu); @@ -22402,17 +22203,7 @@ fn zirFloatFromInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileErro } try sema.requireRuntimeBlock(block, src, operand_src); - if (!is_vector or zcu.backendSupportsFeature(.all_vector_instructions)) { - return block.addTyOp(.float_from_int, dest_ty, operand); - } - const len = operand_ty.vectorLen(zcu); - const new_elems = try sema.arena.alloc(Air.Inst.Ref, len); - for (new_elems, 0..) |*new_elem, i| { - const idx_ref = try pt.intRef(Type.usize, i); - const old_elem = try block.addBinOp(.array_elem_val, operand, idx_ref); - new_elem.* = try block.addTyOp(.float_from_int, dest_scalar_ty, old_elem); - } - return block.addAggregateInit(dest_ty, new_elems); + return block.addTyOp(.float_from_int, dest_ty, operand); } fn zirPtrFromInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { @@ -22431,10 +22222,10 @@ fn zirPtrFromInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError! try sema.checkVectorizableBinaryOperands(block, operand_src, dest_ty, uncoerced_operand_ty, src, operand_src); const is_vector = dest_ty.zigTypeTag(zcu) == .vector; - const operand_ty = if (is_vector) operand_ty: { + const operand_ty: Type = if (is_vector) operand_ty: { const len = dest_ty.vectorLen(zcu); break :operand_ty try pt.vectorType(.{ .child = .usize_type, .len = len }); - } else Type.usize; + } else .usize; const operand_coerced = try sema.coerce(block, operand_ty, operand_res, operand_src); @@ -22482,69 +22273,34 @@ fn zirPtrFromInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError! } try sema.requireRuntimeBlock(block, src, operand_src); try sema.checkLogicalPtrOperation(block, src, ptr_ty); - if (!is_vector or zcu.backendSupportsFeature(.all_vector_instructions)) { - if (block.wantSafety() and (try elem_ty.hasRuntimeBitsSema(pt) or elem_ty.zigTypeTag(zcu) == .@"fn")) { - if (!ptr_ty.isAllowzeroPtr(zcu)) { - const is_non_zero = if (is_vector) all_non_zero: { - const zero_usize = Air.internedToRef((try sema.splat(operand_ty, .zero_usize)).toIntern()); - const is_non_zero = try block.addCmpVector(operand_coerced, zero_usize, .neq); - break :all_non_zero try block.addReduce(is_non_zero, .And); - } else try block.addBinOp(.cmp_neq, operand_coerced, .zero_usize); - try sema.addSafetyCheck(block, src, is_non_zero, .cast_to_null); - } - if (ptr_align.compare(.gt, .@"1")) { - const align_bytes_minus_1 = ptr_align.toByteUnits().? - 1; - const align_mask = Air.internedToRef((try sema.splat(operand_ty, try pt.intValue( - Type.usize, - if (elem_ty.fnPtrMaskOrNull(zcu)) |mask| - align_bytes_minus_1 & mask - else - align_bytes_minus_1, - ))).toIntern()); - const remainder = try block.addBinOp(.bit_and, operand_coerced, align_mask); - const is_aligned = if (is_vector) all_aligned: { - const splat_zero_usize = Air.internedToRef((try sema.splat(operand_ty, .zero_usize)).toIntern()); - const is_aligned = try block.addCmpVector(remainder, splat_zero_usize, .eq); - break :all_aligned try block.addReduce(is_aligned, .And); - } else try block.addBinOp(.cmp_eq, remainder, .zero_usize); - try sema.addSafetyCheck(block, src, is_aligned, .incorrect_alignment); - } - } - return block.addBitCast(dest_ty, operand_coerced); - } - - const len = dest_ty.vectorLen(zcu); if (block.wantSafety() and (try elem_ty.hasRuntimeBitsSema(pt) or elem_ty.zigTypeTag(zcu) == .@"fn")) { - for (0..len) |i| { - const idx_ref = try pt.intRef(Type.usize, i); - const elem_coerced = try block.addBinOp(.array_elem_val, operand_coerced, idx_ref); - if (!ptr_ty.isAllowzeroPtr(zcu)) { - const is_non_zero = try block.addBinOp(.cmp_neq, elem_coerced, .zero_usize); - try sema.addSafetyCheck(block, src, is_non_zero, .cast_to_null); - } - if (ptr_align.compare(.gt, .@"1")) { - const align_bytes_minus_1 = ptr_align.toByteUnits().? - 1; - const align_mask = Air.internedToRef((try pt.intValue( - Type.usize, - if (elem_ty.fnPtrMaskOrNull(zcu)) |mask| - align_bytes_minus_1 & mask - else - align_bytes_minus_1, - )).toIntern()); - const remainder = try block.addBinOp(.bit_and, elem_coerced, align_mask); - const is_aligned = try block.addBinOp(.cmp_eq, remainder, .zero_usize); - try sema.addSafetyCheck(block, src, is_aligned, .incorrect_alignment); - } + if (!ptr_ty.isAllowzeroPtr(zcu)) { + const is_non_zero = if (is_vector) all_non_zero: { + const zero_usize = Air.internedToRef((try sema.splat(operand_ty, .zero_usize)).toIntern()); + const is_non_zero = try block.addCmpVector(operand_coerced, zero_usize, .neq); + break :all_non_zero try block.addReduce(is_non_zero, .And); + } else try block.addBinOp(.cmp_neq, operand_coerced, .zero_usize); + try sema.addSafetyCheck(block, src, is_non_zero, .cast_to_null); + } + if (ptr_align.compare(.gt, .@"1")) { + const align_bytes_minus_1 = ptr_align.toByteUnits().? - 1; + const align_mask = Air.internedToRef((try sema.splat(operand_ty, try pt.intValue( + .usize, + if (elem_ty.fnPtrMaskOrNull(zcu)) |mask| + align_bytes_minus_1 & mask + else + align_bytes_minus_1, + ))).toIntern()); + const remainder = try block.addBinOp(.bit_and, operand_coerced, align_mask); + const is_aligned = if (is_vector) all_aligned: { + const splat_zero_usize = Air.internedToRef((try sema.splat(operand_ty, .zero_usize)).toIntern()); + const is_aligned = try block.addCmpVector(remainder, splat_zero_usize, .eq); + break :all_aligned try block.addReduce(is_aligned, .And); + } else try block.addBinOp(.cmp_eq, remainder, .zero_usize); + try sema.addSafetyCheck(block, src, is_aligned, .incorrect_alignment); } } - - const new_elems = try sema.arena.alloc(Air.Inst.Ref, len); - for (new_elems, 0..) |*new_elem, i| { - const idx_ref = try pt.intRef(Type.usize, i); - const old_elem = try block.addBinOp(.array_elem_val, operand_coerced, idx_ref); - new_elem.* = try block.addBitCast(ptr_ty, old_elem); - } - return block.addAggregateInit(dest_ty, new_elems); + return block.addBitCast(dest_ty, operand_coerced); } fn ptrFromIntVal( @@ -22918,12 +22674,12 @@ fn ptrCastFull( } check_child: { - const src_child = if (dest_info.flags.size == .slice and src_info.flags.size == .one) blk: { + const src_child: Type = if (dest_info.flags.size == .slice and src_info.flags.size == .one) blk: { // *[n]T -> []T break :blk Type.fromInterned(src_info.child).childType(zcu); - } else Type.fromInterned(src_info.child); + } else .fromInterned(src_info.child); - const dest_child = Type.fromInterned(dest_info.child); + const dest_child: Type = .fromInterned(dest_info.child); const imc_res = try sema.coerceInMemoryAllowed( block, @@ -22956,7 +22712,7 @@ fn ptrCastFull( } if (is_array_ptr_to_slice) { // [*]nT -> []T - const arr_ty = Type.fromInterned(src_info.child); + const arr_ty: Type = .fromInterned(src_info.child); if (arr_ty.sentinel(zcu)) |src_sentinel| { const coerced_sent = try zcu.intern_pool.getCoerced(sema.gpa, pt.tid, src_sentinel.toIntern(), dest_info.child); if (dest_info.sentinel == coerced_sent) break :check_sent; @@ -23158,7 +22914,7 @@ fn ptrCastFull( if (dest_info.flags.size == .slice) { // Because the operand is comptime-known and not `null`, the slice length has already been computed: const len: Value = switch (dest_slice_len.?) { - .undef => try pt.undefValue(.usize), + .undef => .undef_usize, .constant => |n| try pt.intValue(.usize, n), .equal_runtime_src_slice => unreachable, .change_runtime_src_slice => unreachable, @@ -23267,7 +23023,7 @@ fn ptrCastFull( if (need_align_check) { assert(operand_ptr_int != .none); const align_mask = try pt.intRef(.usize, mask: { - const target_ptr_mask: u64 = Type.fromInterned(dest_info.child).fnPtrMaskOrNull(zcu) orelse ~@as(u64, 0); + const target_ptr_mask = Type.fromInterned(dest_info.child).fnPtrMaskOrNull(zcu) orelse ~@as(u64, 0); break :mask (dest_align.toByteUnits().? - 1) & target_ptr_mask; }); const ptr_masked = try block.addBinOp(.bit_and, operand_ptr_int, align_mask); @@ -23288,7 +23044,7 @@ fn ptrCastFull( assert(need_operand_ptr); const result_len: Air.Inst.Ref = switch (dest_slice_len.?) { - .undef => try pt.undefRef(.usize), + .undef => .undef_usize, .constant => |n| try pt.intRef(.usize, n), .equal_runtime_src_slice => len: { assert(need_operand_len); @@ -23658,13 +23414,13 @@ fn zirBitReverse(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError! fn zirBitOffsetOf(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const offset = try sema.bitOffsetOf(block, inst); - return sema.pt.intRef(Type.comptime_int, offset); + return sema.pt.intRef(.comptime_int, offset); } fn zirOffsetOf(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const offset = try sema.bitOffsetOf(block, inst); // TODO reminder to make this a compile error for packed structs - return sema.pt.intRef(Type.comptime_int, offset / 8); + return sema.pt.intRef(.comptime_int, offset / 8); } fn bitOffsetOf(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!u64 { @@ -23705,7 +23461,7 @@ fn bitOffsetOf(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!u6 if (i == field_index) { return bit_sum; } - const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[i]); + const field_ty: Type = .fromInterned(struct_type.field_types.get(ip)[i]); bit_sum += field_ty.bitSize(zcu); } else unreachable; }, @@ -24497,8 +24253,8 @@ fn analyzeShuffle( block: *Block, src_node: std.zig.Ast.Node.Offset, elem_ty: Type, - a_arg: Air.Inst.Ref, - b_arg: Air.Inst.Ref, + a_uncoerced: Air.Inst.Ref, + b_uncoerced: Air.Inst.Ref, mask: Value, mask_len: u32, ) CompileError!Air.Inst.Ref { @@ -24507,150 +24263,154 @@ fn analyzeShuffle( const a_src = block.builtinCallArgSrc(src_node, 1); const b_src = block.builtinCallArgSrc(src_node, 2); const mask_src = block.builtinCallArgSrc(src_node, 3); - var a = a_arg; - var b = b_arg; - const res_ty = try pt.vectorType(.{ - .len = mask_len, - .child = elem_ty.toIntern(), - }); - - const maybe_a_len = switch (sema.typeOf(a).zigTypeTag(zcu)) { - .array, .vector => sema.typeOf(a).arrayLen(zcu), - .undefined => null, - else => return sema.fail(block, a_src, "expected vector or array with element type '{}', found '{}'", .{ - elem_ty.fmt(pt), - sema.typeOf(a).fmt(pt), - }), + // If the type of `a` is `@Type(.undefined)`, i.e. the argument is untyped, this is 0, because it is an error to index into this vector. + const a_len: u32 = switch (sema.typeOf(a_uncoerced).zigTypeTag(zcu)) { + .array, .vector => @intCast(sema.typeOf(a_uncoerced).arrayLen(zcu)), + .undefined => 0, + else => return sema.fail(block, a_src, "expected vector of '{}', found '{}'", .{ elem_ty.fmt(pt), sema.typeOf(a_uncoerced).fmt(pt) }), }; - const maybe_b_len = switch (sema.typeOf(b).zigTypeTag(zcu)) { - .array, .vector => sema.typeOf(b).arrayLen(zcu), - .undefined => null, - else => return sema.fail(block, b_src, "expected vector or array with element type '{}', found '{}'", .{ - elem_ty.fmt(pt), - sema.typeOf(b).fmt(pt), - }), + const a_ty = try pt.vectorType(.{ .len = a_len, .child = elem_ty.toIntern() }); + const a_coerced = try sema.coerce(block, a_ty, a_uncoerced, a_src); + + // If the type of `b` is `@Type(.undefined)`, i.e. the argument is untyped, this is 0, because it is an error to index into this vector. + const b_len: u32 = switch (sema.typeOf(b_uncoerced).zigTypeTag(zcu)) { + .array, .vector => @intCast(sema.typeOf(b_uncoerced).arrayLen(zcu)), + .undefined => 0, + else => return sema.fail(block, b_src, "expected vector of '{}', found '{}'", .{ elem_ty.fmt(pt), sema.typeOf(b_uncoerced).fmt(pt) }), }; - if (maybe_a_len == null and maybe_b_len == null) { - return pt.undefRef(res_ty); - } - const a_len: u32 = @intCast(maybe_a_len orelse maybe_b_len.?); - const b_len: u32 = @intCast(maybe_b_len orelse a_len); + const b_ty = try pt.vectorType(.{ .len = b_len, .child = elem_ty.toIntern() }); + const b_coerced = try sema.coerce(block, b_ty, b_uncoerced, b_src); - const a_ty = try pt.vectorType(.{ - .len = a_len, - .child = elem_ty.toIntern(), - }); - const b_ty = try pt.vectorType(.{ - .len = b_len, - .child = elem_ty.toIntern(), - }); + const result_ty = try pt.vectorType(.{ .len = mask_len, .child = elem_ty.toIntern() }); - if (maybe_a_len == null) a = try pt.undefRef(a_ty) else a = try sema.coerce(block, a_ty, a, a_src); - if (maybe_b_len == null) b = try pt.undefRef(b_ty) else b = try sema.coerce(block, b_ty, b, b_src); + // We're going to pre-emptively reserve space in `sema.air_extra`. The reason for this is we need + // a `u32` buffer of length `mask_len` anyway, and putting it in `sema.air_extra` avoids a copy + // in the runtime case. If the result is comptime-known, we'll shrink `air_extra` back. + const air_extra_idx: u32 = @intCast(sema.air_extra.items.len); + const air_mask_buf = try sema.air_extra.addManyAsSlice(sema.gpa, mask_len); - const operand_info = [2]std.meta.Tuple(&.{ u64, LazySrcLoc, Type }){ - .{ a_len, a_src, a_ty }, - .{ b_len, b_src, b_ty }, - }; + // We want to interpret that buffer in `air_extra` in a few ways. Initially, we'll consider its + // elements as `Air.Inst.ShuffleTwoMask`, essentially representing the raw mask values; then, we'll + // convert it to `InternPool.Index` or `Air.Inst.ShuffleOneMask` if there are comptime-known operands. + const mask_ip_index: []InternPool.Index = @ptrCast(air_mask_buf); + const mask_shuffle_one: []Air.ShuffleOneMask = @ptrCast(air_mask_buf); + const mask_shuffle_two: []Air.ShuffleTwoMask = @ptrCast(air_mask_buf); - for (0..@intCast(mask_len)) |i| { - const elem = try mask.elemValue(pt, i); - if (elem.isUndef(zcu)) continue; - const elem_resolved = try sema.resolveLazyValue(elem); - const int = elem_resolved.toSignedInt(zcu); - var unsigned: u32 = undefined; - var chosen: u32 = undefined; - if (int >= 0) { - unsigned = @intCast(int); - chosen = 0; - } else { - unsigned = @intCast(~int); - chosen = 1; + // Initial loop: check mask elements, populate `mask_shuffle_two`. + var a_used = false; + var b_used = false; + for (mask_shuffle_two, 0..mask_len) |*out, mask_idx| { + const mask_val = try mask.elemValue(pt, mask_idx); + if (mask_val.isUndef(zcu)) { + out.* = .undef; + continue; } - if (unsigned >= operand_info[chosen][0]) { - const msg = msg: { - const msg = try sema.errMsg(mask_src, "mask index '{d}' has out-of-bounds selection", .{i}); + // Safe because mask elements are `i32` and we already checked for undef: + const raw = (try sema.resolveLazyValue(mask_val)).toSignedInt(zcu); + if (raw >= 0) { + const idx: u32 = @intCast(raw); + a_used = true; + out.* = .aElem(idx); + if (idx >= a_len) return sema.failWithOwnedErrorMsg(block, msg: { + const msg = try sema.errMsg(mask_src, "mask element at index '{d}' selects out-of-bounds index", .{mask_idx}); errdefer msg.destroy(sema.gpa); - - try sema.errNote(operand_info[chosen][1], msg, "selected index '{d}' out of bounds of '{}'", .{ - unsigned, - operand_info[chosen][2].fmt(pt), - }); - - if (chosen == 0) { - try sema.errNote(b_src, msg, "selections from the second vector are specified with negative numbers", .{}); + try sema.errNote(a_src, msg, "index '{d}' exceeds bounds of '{}' given here", .{ idx, a_ty.fmt(pt) }); + if (idx < b_len) { + try sema.errNote(b_src, msg, "use '~@as(u32, {d})' to index into second vector given here", .{idx}); } - break :msg msg; - }; - return sema.failWithOwnedErrorMsg(block, msg); - } - } - - if (try sema.resolveValue(a)) |a_val| { - if (try sema.resolveValue(b)) |b_val| { - const values = try sema.arena.alloc(InternPool.Index, mask_len); - for (values, 0..) |*value, i| { - const mask_elem_val = try mask.elemValue(pt, i); - if (mask_elem_val.isUndef(zcu)) { - value.* = try pt.intern(.{ .undef = elem_ty.toIntern() }); - continue; - } - const int = mask_elem_val.toSignedInt(zcu); - const unsigned: u32 = @intCast(if (int >= 0) int else ~int); - values[i] = (try (if (int >= 0) a_val else b_val).elemValue(pt, unsigned)).toIntern(); - } - return Air.internedToRef((try pt.intern(.{ .aggregate = .{ - .ty = res_ty.toIntern(), - .storage = .{ .elems = values }, - } }))); - } - } - - // All static analysis passed, and not comptime. - // For runtime codegen, vectors a and b must be the same length. Here we - // recursively @shuffle the smaller vector to append undefined elements - // to it up to the length of the longer vector. This recursion terminates - // in 1 call because these calls to analyzeShuffle guarantee a_len == b_len. - if (a_len != b_len) { - const min_len = @min(a_len, b_len); - const max_src = if (a_len > b_len) a_src else b_src; - const max_len = try sema.usizeCast(block, max_src, @max(a_len, b_len)); - - const expand_mask_values = try sema.arena.alloc(InternPool.Index, max_len); - for (@intCast(0)..@intCast(min_len)) |i| { - expand_mask_values[i] = (try pt.intValue(Type.comptime_int, i)).toIntern(); - } - for (@intCast(min_len)..@intCast(max_len)) |i| { - expand_mask_values[i] = (try pt.intValue(Type.comptime_int, -1)).toIntern(); - } - const expand_mask = try pt.intern(.{ .aggregate = .{ - .ty = (try pt.vectorType(.{ .len = @intCast(max_len), .child = .comptime_int_type })).toIntern(), - .storage = .{ .elems = expand_mask_values }, - } }); - - if (a_len < b_len) { - const undef = try pt.undefRef(a_ty); - a = try sema.analyzeShuffle(block, src_node, elem_ty, a, undef, Value.fromInterned(expand_mask), @intCast(max_len)); + }); } else { - const undef = try pt.undefRef(b_ty); - b = try sema.analyzeShuffle(block, src_node, elem_ty, b, undef, Value.fromInterned(expand_mask), @intCast(max_len)); + const idx: u32 = @intCast(~raw); + b_used = true; + out.* = .bElem(idx); + if (idx >= b_len) return sema.failWithOwnedErrorMsg(block, msg: { + const msg = try sema.errMsg(mask_src, "mask element at index '{d}' selects out-of-bounds index", .{mask_idx}); + errdefer msg.destroy(sema.gpa); + try sema.errNote(b_src, msg, "index '{d}' exceeds bounds of '{}' given here", .{ idx, b_ty.fmt(pt) }); + break :msg msg; + }); } } - return block.addInst(.{ - .tag = .shuffle, - .data = .{ .ty_pl = .{ - .ty = Air.internedToRef(res_ty.toIntern()), - .payload = try block.sema.addExtra(Air.Shuffle{ - .a = a, - .b = b, - .mask = mask.toIntern(), - .mask_len = mask_len, - }), - } }, - }); + const maybe_a_val = try sema.resolveValue(a_coerced); + const maybe_b_val = try sema.resolveValue(b_coerced); + + const a_rt = a_used and maybe_a_val == null; + const b_rt = b_used and maybe_b_val == null; + + if (a_rt and b_rt) { + // Both operands are needed and runtime-known. We need a `[]ShuffleTwomask`... which is + // exactly what we already have in `mask_shuffle_two`! So, we're basically done already. + // We just need to append the two operands. + try sema.air_extra.ensureUnusedCapacity(sema.gpa, 2); + sema.appendRefsAssumeCapacity(&.{ a_coerced, b_coerced }); + return block.addInst(.{ + .tag = .shuffle_two, + .data = .{ .ty_pl = .{ + .ty = Air.internedToRef(result_ty.toIntern()), + .payload = air_extra_idx, + } }, + }); + } else if (a_rt) { + // We need to convert the `ShuffleTwoMask` values to `ShuffleOneMask`. + for (mask_shuffle_two, mask_shuffle_one) |in, *out| { + out.* = switch (in.unwrap()) { + .undef => .value(try pt.undefValue(elem_ty)), + .a_elem => |idx| .elem(idx), + .b_elem => |idx| .value(try maybe_b_val.?.elemValue(pt, idx)), + }; + } + // Now just append our single runtime operand, and we're done. + try sema.air_extra.ensureUnusedCapacity(sema.gpa, 1); + sema.appendRefsAssumeCapacity(&.{a_coerced}); + return block.addInst(.{ + .tag = .shuffle_one, + .data = .{ .ty_pl = .{ + .ty = Air.internedToRef(result_ty.toIntern()), + .payload = air_extra_idx, + } }, + }); + } else if (b_rt) { + // We need to convert the `ShuffleTwoMask` values to `ShuffleOneMask`. + for (mask_shuffle_two, mask_shuffle_one) |in, *out| { + out.* = switch (in.unwrap()) { + .undef => .value(try pt.undefValue(elem_ty)), + .a_elem => |idx| .value(try maybe_a_val.?.elemValue(pt, idx)), + .b_elem => |idx| .elem(idx), + }; + } + // Now just append our single runtime operand, and we're done. + try sema.air_extra.ensureUnusedCapacity(sema.gpa, 1); + sema.appendRefsAssumeCapacity(&.{b_coerced}); + return block.addInst(.{ + .tag = .shuffle_one, + .data = .{ .ty_pl = .{ + .ty = Air.internedToRef(result_ty.toIntern()), + .payload = air_extra_idx, + } }, + }); + } else { + // The result will be comptime-known. We must convert the `ShuffleTwoMask` values to + // `InternPool.Index` values using the known operands. + for (mask_shuffle_two, mask_ip_index) |in, *out| { + const val: Value = switch (in.unwrap()) { + .undef => try pt.undefValue(elem_ty), + .a_elem => |idx| try maybe_a_val.?.elemValue(pt, idx), + .b_elem => |idx| try maybe_b_val.?.elemValue(pt, idx), + }; + out.* = val.toIntern(); + } + const res = try pt.intern(.{ .aggregate = .{ + .ty = result_ty.toIntern(), + .storage = .{ .elems = mask_ip_index }, + } }); + // We have a comptime-known result, so didn't need `air_mask_buf` -- remove it from `sema.air_extra`. + assert(sema.air_extra.items.len == air_extra_idx + air_mask_buf.len); + sema.air_extra.shrinkRetainingCapacity(air_extra_idx); + return Air.internedToRef(res); + } } fn zirSelect(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) CompileError!Air.Inst.Ref { @@ -25087,7 +24847,7 @@ fn zirFieldParentPtr(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.Ins if (parent_ptr_info.flags.size != .one) { return sema.fail(block, inst_src, "expected single pointer type, found '{}'", .{parent_ptr_ty.fmt(pt)}); } - const parent_ty = Type.fromInterned(parent_ptr_info.child); + const parent_ty: Type = .fromInterned(parent_ptr_info.child); switch (parent_ty.zigTypeTag(zcu)) { .@"struct", .@"union" => {}, else => return sema.fail(block, inst_src, "expected pointer to struct or union type, found '{}'", .{parent_ptr_ty.fmt(pt)}), @@ -25741,7 +25501,7 @@ fn zirMemcpy( if (try sema.resolveDefinedValue(block, dest_src, dest_len)) |dest_len_val| { len_val = dest_len_val; if (try sema.resolveDefinedValue(block, src_src, src_len)) |src_len_val| { - if (!(try sema.valuesEqual(dest_len_val, src_len_val, Type.usize))) { + if (!(try sema.valuesEqual(dest_len_val, src_len_val, .usize))) { const msg = msg: { const msg = try sema.errMsg(src, "non-matching copy lengths", .{}); errdefer msg.destroy(sema.gpa); @@ -25952,7 +25712,7 @@ fn zirMemset(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void const dest_elem_ty: Type = dest_elem_ty: { const ptr_info = dest_ptr_ty.ptrInfo(zcu); switch (ptr_info.flags.size) { - .slice => break :dest_elem_ty Type.fromInterned(ptr_info.child), + .slice => break :dest_elem_ty .fromInterned(ptr_info.child), .one => { if (Type.fromInterned(ptr_info.child).zigTypeTag(zcu) == .array) { break :dest_elem_ty Type.fromInterned(ptr_info.child).childType(zcu); @@ -26118,7 +25878,7 @@ fn zirFuncFancy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A extra_index += body.len; if (extra.data.bits.ret_ty_is_generic) break :blk .generic_poison; - const val = try sema.resolveGenericBody(block, ret_src, body, inst, Type.type, .{ .simple = .function_ret_ty }); + const val = try sema.resolveGenericBody(block, ret_src, body, inst, .type, .{ .simple = .function_ret_ty }); const ty = val.toType(); break :blk ty; } else if (extra.data.bits.has_ret_ty_ref) blk: { @@ -26129,7 +25889,7 @@ fn zirFuncFancy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A const ret_ty_air_ref = try sema.resolveInst(ret_ty_ref); const ret_ty_val = try sema.resolveConstDefinedValue(block, ret_src, ret_ty_air_ref, .{ .simple = .function_ret_ty }); break :blk ret_ty_val.toType(); - } else Type.void; + } else .void; const noalias_bits: u32 = if (extra.data.bits.has_any_noalias) blk: { const x = sema.code.extra[extra_index]; @@ -26223,7 +25983,7 @@ fn zirWasmMemorySize( return sema.fail(block, builtin_src, "builtin @wasmMemorySize is available when targeting WebAssembly; targeted CPU architecture is {s}", .{@tagName(target.cpu.arch)}); } - const index: u32 = @intCast(try sema.resolveInt(block, index_src, extra.operand, Type.u32, .{ .simple = .wasm_memory_index })); + const index: u32 = @intCast(try sema.resolveInt(block, index_src, extra.operand, .u32, .{ .simple = .wasm_memory_index })); try sema.requireRuntimeBlock(block, builtin_src, null); return block.addInst(.{ .tag = .wasm_memory_size, @@ -26248,8 +26008,8 @@ fn zirWasmMemoryGrow( return sema.fail(block, builtin_src, "builtin @wasmMemoryGrow is available when targeting WebAssembly; targeted CPU architecture is {s}", .{@tagName(target.cpu.arch)}); } - const index: u32 = @intCast(try sema.resolveInt(block, index_src, extra.lhs, Type.u32, .{ .simple = .wasm_memory_index })); - const delta = try sema.coerce(block, Type.usize, try sema.resolveInst(extra.rhs), delta_src); + const index: u32 = @intCast(try sema.resolveInt(block, index_src, extra.lhs, .u32, .{ .simple = .wasm_memory_index })); + const delta = try sema.coerce(block, .usize, try sema.resolveInst(extra.rhs), delta_src); try sema.requireRuntimeBlock(block, builtin_src, null); return block.addInst(.{ @@ -26484,7 +26244,7 @@ fn zirWorkItem( }, } - const dimension: u32 = @intCast(try sema.resolveInt(block, dimension_src, extra.operand, Type.u32, .{ .simple = .work_group_dim_index })); + const dimension: u32 = @intCast(try sema.resolveInt(block, dimension_src, extra.operand, .u32, .{ .simple = .work_group_dim_index })); try sema.requireRuntimeBlock(block, builtin_src, null); return block.addInst(.{ @@ -26552,7 +26312,7 @@ fn zirBuiltinValue(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstD const inline_tag_val = try pt.enumValue( callconv_tag_ty, (try pt.intValue( - Type.u8, + .u8, @intFromEnum(std.builtin.CallingConvention.@"inline"), )).toIntern(), ); @@ -26760,7 +26520,7 @@ fn explainWhyTypeIsComptimeInner( if (zcu.typeToStruct(ty)) |struct_type| { for (0..struct_type.field_types.len) |i| { - const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[i]); + const field_ty: Type = .fromInterned(struct_type.field_types.get(ip)[i]); const field_src: LazySrcLoc = .{ .base_node_inst = struct_type.zir_index, .offset = .{ .container_field_type = @intCast(i) }, @@ -26780,7 +26540,7 @@ fn explainWhyTypeIsComptimeInner( if (zcu.typeToUnion(ty)) |union_obj| { for (0..union_obj.field_types.len) |i| { - const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[i]); + const field_ty: Type = .fromInterned(union_obj.field_types.get(ip)[i]); const field_src: LazySrcLoc = .{ .base_node_inst = union_obj.zir_index, .offset = .{ .container_field_type = @intCast(i) }, @@ -27171,7 +26931,7 @@ fn addSafetyCheckUnwrapError( defer fail_block.instructions.deinit(gpa); - const err = try fail_block.addTyOp(unwrap_err_tag, Type.anyerror, operand); + const err = try fail_block.addTyOp(unwrap_err_tag, .anyerror, operand); try safetyPanicUnwrapError(sema, &fail_block, src, err); try sema.addSafetyCheckExtra(parent_block, ok, &fail_block); @@ -27344,7 +27104,7 @@ fn fieldVal( switch (inner_ty.zigTypeTag(zcu)) { .array => { if (field_name.eqlSlice("len", ip)) { - return Air.internedToRef((try pt.intValue(Type.usize, inner_ty.arrayLen(zcu))).toIntern()); + return Air.internedToRef((try pt.intValue(.usize, inner_ty.arrayLen(zcu))).toIntern()); } else if (field_name.eqlSlice("ptr", ip) and is_pointer_to) { const ptr_info = object_ty.ptrInfo(zcu); const result_ty = try pt.ptrTypeSema(.{ @@ -27527,7 +27287,7 @@ fn fieldPtr( switch (inner_ty.zigTypeTag(zcu)) { .array => { if (field_name.eqlSlice("len", ip)) { - const int_val = try pt.intValue(Type.usize, inner_ty.arrayLen(zcu)); + const int_val = try pt.intValue(.usize, inner_ty.arrayLen(zcu)); return uavRef(sema, int_val.toIntern()); } else if (field_name.eqlSlice("ptr", ip) and is_pointer_to) { const ptr_info = object_ty.ptrInfo(zcu); @@ -27769,12 +27529,12 @@ fn fieldCallBind( if (zcu.typeToStruct(concrete_ty)) |struct_type| { const field_index = struct_type.nameIndex(ip, field_name) orelse break :find_field; - const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[field_index]); + const field_ty: Type = .fromInterned(struct_type.field_types.get(ip)[field_index]); return sema.finishFieldCallBind(block, src, ptr_ty, field_ty, field_index, object_ptr); } else if (concrete_ty.isTuple(zcu)) { if (field_name.eqlSlice("len", ip)) { - return .{ .direct = try pt.intRef(Type.usize, concrete_ty.structFieldCount(zcu)) }; + return .{ .direct = try pt.intRef(.usize, concrete_ty.structFieldCount(zcu)) }; } if (field_name.toUnsigned(ip)) |field_index| { if (field_index >= concrete_ty.structFieldCount(zcu)) break :find_field; @@ -27817,7 +27577,7 @@ fn fieldCallBind( if (zcu.typeToFunc(decl_type)) |func_type| f: { if (func_type.param_types.len == 0) break :f; - const first_param_type = Type.fromInterned(func_type.param_types.get(ip)[0]); + const first_param_type: Type = .fromInterned(func_type.param_types.get(ip)[0]); if (first_param_type.isGenericPoison() or (first_param_type.zigTypeTag(zcu) == .pointer and (first_param_type.ptrSize(zcu) == .one or @@ -28003,7 +27763,7 @@ fn structFieldPtr( if (struct_ty.isTuple(zcu)) { if (field_name.eqlSlice("len", ip)) { - const len_inst = try pt.intRef(Type.usize, struct_ty.structFieldCount(zcu)); + const len_inst = try pt.intRef(.usize, struct_ty.structFieldCount(zcu)); return sema.analyzeRef(block, src, len_inst); } const field_index = try sema.tupleFieldIndex(block, struct_ty, field_name, field_name_src); @@ -28134,7 +27894,7 @@ fn structFieldVal( return Air.internedToRef(struct_type.field_inits.get(ip)[field_index]); } - const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[field_index]); + const field_ty: Type = .fromInterned(struct_type.field_types.get(ip)[field_index]); if (try sema.typeHasOnePossibleValue(field_ty)) |field_val| return Air.internedToRef(field_val.toIntern()); @@ -28167,7 +27927,7 @@ fn tupleFieldVal( const pt = sema.pt; const zcu = pt.zcu; if (field_name.eqlSlice("len", &zcu.intern_pool)) { - return pt.intRef(Type.usize, tuple_ty.structFieldCount(zcu)); + return pt.intRef(.usize, tuple_ty.structFieldCount(zcu)); } const field_index = try sema.tupleFieldIndex(block, tuple_ty, field_name, field_name_src); return sema.tupleFieldValByIndex(block, tuple_byval, field_index, tuple_ty); @@ -28220,7 +27980,7 @@ fn tupleFieldValByIndex( return switch (zcu.intern_pool.indexToKey(tuple_val.toIntern())) { .undef => pt.undefRef(field_ty), .aggregate => |aggregate| Air.internedToRef(switch (aggregate.storage) { - .bytes => |bytes| try pt.intValue(Type.u8, bytes.at(field_index, &zcu.intern_pool)), + .bytes => |bytes| try pt.intValue(.u8, bytes.at(field_index, &zcu.intern_pool)), .elems => |elems| Value.fromInterned(elems[field_index]), .repeated_elem => |elem| Value.fromInterned(elem), }.toIntern()), @@ -28253,7 +28013,7 @@ fn unionFieldPtr( try union_ty.resolveFields(pt); const union_obj = zcu.typeToUnion(union_ty).?; const field_index = try sema.unionFieldIndex(block, union_ty, field_name, field_name_src); - const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[field_index]); + const field_ty: Type = .fromInterned(union_obj.field_types.get(ip)[field_index]); const ptr_field_ty = try pt.ptrTypeSema(.{ .child = field_ty.toIntern(), .flags = .{ @@ -28295,8 +28055,8 @@ fn unionFieldPtr( break :ct; } // Store to the union to initialize the tag. - const field_tag = try pt.enumValueFieldIndex(Type.fromInterned(union_obj.enum_tag_ty), enum_field_index); - const payload_ty = Type.fromInterned(union_obj.field_types.get(ip)[field_index]); + const field_tag = try pt.enumValueFieldIndex(.fromInterned(union_obj.enum_tag_ty), enum_field_index); + const payload_ty: Type = .fromInterned(union_obj.field_types.get(ip)[field_index]); const new_union_val = try pt.unionValue(union_ty, field_tag, try pt.undefValue(payload_ty)); try sema.storePtrVal(block, src, union_ptr_val, new_union_val, union_ty); } else { @@ -28306,7 +28066,7 @@ fn unionFieldPtr( return sema.failWithUseOfUndef(block, src); } const un = ip.indexToKey(union_val.toIntern()).un; - const field_tag = try pt.enumValueFieldIndex(Type.fromInterned(union_obj.enum_tag_ty), enum_field_index); + const field_tag = try pt.enumValueFieldIndex(.fromInterned(union_obj.enum_tag_ty), enum_field_index); const tag_matches = un.tag == field_tag.toIntern(); if (!tag_matches) { const msg = msg: { @@ -28332,11 +28092,11 @@ fn unionFieldPtr( if (!initializing and union_obj.flagsUnordered(ip).layout == .auto and block.wantSafety() and union_ty.unionTagTypeSafety(zcu) != null and union_obj.field_types.len > 1) { - const wanted_tag_val = try pt.enumValueFieldIndex(Type.fromInterned(union_obj.enum_tag_ty), enum_field_index); + const wanted_tag_val = try pt.enumValueFieldIndex(.fromInterned(union_obj.enum_tag_ty), enum_field_index); const wanted_tag = Air.internedToRef(wanted_tag_val.toIntern()); // TODO would it be better if get_union_tag supported pointers to unions? const union_val = try block.addTyOp(.load, union_ty, union_ptr); - const active_tag = try block.addTyOp(.get_union_tag, Type.fromInterned(union_obj.enum_tag_ty), union_val); + const active_tag = try block.addTyOp(.get_union_tag, .fromInterned(union_obj.enum_tag_ty), union_val); try sema.addSafetyCheckInactiveUnionField(block, src, active_tag, wanted_tag); } if (field_ty.zigTypeTag(zcu) == .noreturn) { @@ -28363,14 +28123,14 @@ fn unionFieldVal( try union_ty.resolveFields(pt); const union_obj = zcu.typeToUnion(union_ty).?; const field_index = try sema.unionFieldIndex(block, union_ty, field_name, field_name_src); - const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[field_index]); + const field_ty: Type = .fromInterned(union_obj.field_types.get(ip)[field_index]); const enum_field_index: u32 = @intCast(Type.fromInterned(union_obj.enum_tag_ty).enumFieldIndex(field_name, zcu).?); if (try sema.resolveValue(union_byval)) |union_val| { if (union_val.isUndef(zcu)) return pt.undefRef(field_ty); const un = ip.indexToKey(union_val.toIntern()).un; - const field_tag = try pt.enumValueFieldIndex(Type.fromInterned(union_obj.enum_tag_ty), enum_field_index); + const field_tag = try pt.enumValueFieldIndex(.fromInterned(union_obj.enum_tag_ty), enum_field_index); const tag_matches = un.tag == field_tag.toIntern(); switch (union_obj.flagsUnordered(ip).layout) { .auto => { @@ -28408,9 +28168,9 @@ fn unionFieldVal( if (union_obj.flagsUnordered(ip).layout == .auto and block.wantSafety() and union_ty.unionTagTypeSafety(zcu) != null and union_obj.field_types.len > 1) { - const wanted_tag_val = try pt.enumValueFieldIndex(Type.fromInterned(union_obj.enum_tag_ty), enum_field_index); + const wanted_tag_val = try pt.enumValueFieldIndex(.fromInterned(union_obj.enum_tag_ty), enum_field_index); const wanted_tag = Air.internedToRef(wanted_tag_val.toIntern()); - const active_tag = try block.addTyOp(.get_union_tag, Type.fromInterned(union_obj.enum_tag_ty), union_byval); + const active_tag = try block.addTyOp(.get_union_tag, .fromInterned(union_obj.enum_tag_ty), union_byval); try sema.addSafetyCheckInactiveUnionField(block, src, active_tag, wanted_tag); } if (field_ty.zigTypeTag(zcu) == .noreturn) { @@ -28540,7 +28300,7 @@ fn elemVal( // TODO in case of a vector of pointers, we need to detect whether the element // index is a scalar or vector instead of unconditionally casting to usize. - const elem_index = try sema.coerce(block, Type.usize, elem_index_uncasted, elem_index_src); + const elem_index = try sema.coerce(block, .usize, elem_index_uncasted, elem_index_src); switch (indexable_ty.zigTypeTag(zcu)) { .pointer => switch (indexable_ty.ptrSize(zcu)) { @@ -28795,7 +28555,7 @@ fn elemValArray( if (oob_safety and block.wantSafety()) { // Runtime check is only needed if unable to comptime check. if (maybe_index_val == null) { - const len_inst = try pt.intRef(Type.usize, array_len); + const len_inst = try pt.intRef(.usize, array_len); const cmp_op: Air.Inst.Tag = if (array_sent != null) .cmp_lte else .cmp_lt; try sema.addSafetyCheckIndexOob(block, src, elem_index, len_inst, cmp_op); } @@ -28860,7 +28620,7 @@ fn elemPtrArray( // Runtime check is only needed if unable to comptime check. if (oob_safety and block.wantSafety() and offset == null) { - const len_inst = try pt.intRef(Type.usize, array_len); + const len_inst = try pt.intRef(.usize, array_len); const cmp_op: Air.Inst.Tag = if (array_sent) .cmp_lte else .cmp_lt; try sema.addSafetyCheckIndexOob(block, src, elem_index, len_inst, cmp_op); } @@ -28917,9 +28677,9 @@ fn elemValSlice( if (oob_safety and block.wantSafety()) { const len_inst = if (maybe_slice_val) |slice_val| - try pt.intRef(Type.usize, try slice_val.sliceLen(pt)) + try pt.intRef(.usize, try slice_val.sliceLen(pt)) else - try block.addTyOp(.slice_len, Type.usize, slice); + try block.addTyOp(.slice_len, .usize, slice); const cmp_op: Air.Inst.Tag = if (slice_sent) .cmp_lte else .cmp_lt; try sema.addSafetyCheckIndexOob(block, src, elem_index, len_inst, cmp_op); } @@ -28976,8 +28736,8 @@ fn elemPtrSlice( const len_inst = len: { if (maybe_undef_slice_val) |slice_val| if (!slice_val.isUndef(zcu)) - break :len try pt.intRef(Type.usize, try slice_val.sliceLen(pt)); - break :len try block.addTyOp(.slice_len, Type.usize, slice); + break :len try pt.intRef(.usize, try slice_val.sliceLen(pt)); + break :len try block.addTyOp(.slice_len, .usize, slice); }; const cmp_op: Air.Inst.Tag = if (slice_sent) .cmp_lte else .cmp_lt; try sema.addSafetyCheckIndexOob(block, src, elem_index, len_inst, cmp_op); @@ -29142,7 +28902,7 @@ fn coerceExtra( if (!inst_ty.isSinglePointer(zcu)) break :single_item; if (!sema.checkPtrAttributes(dest_ty, inst_ty, &in_memory_result)) break :pointer; const ptr_elem_ty = inst_ty.childType(zcu); - const array_ty = Type.fromInterned(dest_info.child); + const array_ty: Type = .fromInterned(dest_info.child); if (array_ty.zigTypeTag(zcu) != .array) break :single_item; const array_elem_ty = array_ty.childType(zcu); if (array_ty.arrayLen(zcu) != 1) break :single_item; @@ -29164,7 +28924,7 @@ fn coerceExtra( const array_elem_type = array_ty.childType(zcu); const dest_is_mut = !dest_info.flags.is_const; - const dst_elem_type = Type.fromInterned(dest_info.child); + const dst_elem_type: Type = .fromInterned(dest_info.child); const elem_res = try sema.coerceInMemoryAllowed(block, dst_elem_type, array_elem_type, dest_is_mut, target, dest_ty_src, inst_src, maybe_inst_val); switch (elem_res) { .ok => {}, @@ -29225,7 +28985,7 @@ fn coerceExtra( // could be null. const src_elem_ty = inst_ty.childType(zcu); const dest_is_mut = !dest_info.flags.is_const; - const dst_elem_type = Type.fromInterned(dest_info.child); + const dst_elem_type: Type = .fromInterned(dest_info.child); switch (try sema.coerceInMemoryAllowed(block, dst_elem_type, src_elem_ty, dest_is_mut, target, dest_ty_src, inst_src, maybe_inst_val)) { .ok => {}, else => break :src_c_ptr, @@ -29265,16 +29025,16 @@ fn coerceExtra( .byte_offset = 0, } })), .comptime_int => { - const addr = sema.coerceExtra(block, Type.usize, inst, inst_src, .{ .report_err = false }) catch |err| switch (err) { + const addr = sema.coerceExtra(block, .usize, inst, inst_src, .{ .report_err = false }) catch |err| switch (err) { error.NotCoercible => break :pointer, else => |e| return e, }; return try sema.coerceCompatiblePtrs(block, dest_ty, addr, inst_src); }, .int => { - const ptr_size_ty = switch (inst_ty.intInfo(zcu).signedness) { - .signed => Type.isize, - .unsigned => Type.usize, + const ptr_size_ty: Type = switch (inst_ty.intInfo(zcu).signedness) { + .signed => .isize, + .unsigned => .usize, }; const addr = sema.coerceExtra(block, ptr_size_ty, inst, inst_src, .{ .report_err = false }) catch |err| switch (err) { error.NotCoercible => { @@ -29291,8 +29051,8 @@ fn coerceExtra( const inst_info = inst_ty.ptrInfo(zcu); switch (try sema.coerceInMemoryAllowed( block, - Type.fromInterned(dest_info.child), - Type.fromInterned(inst_info.child), + .fromInterned(dest_info.child), + .fromInterned(inst_info.child), !dest_info.flags.is_const, target, dest_ty_src, @@ -29305,7 +29065,7 @@ fn coerceExtra( if (inst_info.flags.size == .slice) { assert(dest_info.sentinel == .none); if (inst_info.sentinel == .none or - inst_info.sentinel != (try pt.intValue(Type.fromInterned(inst_info.child), 0)).toIntern()) + inst_info.sentinel != (try pt.intValue(.fromInterned(inst_info.child), 0)).toIntern()) break :p; const slice_ptr = try sema.analyzeSlicePtr(block, inst_src, inst, inst_ty); @@ -29364,8 +29124,8 @@ fn coerceExtra( switch (try sema.coerceInMemoryAllowed( block, - Type.fromInterned(dest_info.child), - Type.fromInterned(inst_info.child), + .fromInterned(dest_info.child), + .fromInterned(inst_info.child), !dest_info.flags.is_const, target, dest_ty_src, @@ -29378,7 +29138,7 @@ fn coerceExtra( if (dest_info.sentinel == .none or inst_info.sentinel == .none or Air.internedToRef(dest_info.sentinel) != - try sema.coerceInMemory(Value.fromInterned(inst_info.sentinel), Type.fromInterned(dest_info.child))) + try sema.coerceInMemory(Value.fromInterned(inst_info.sentinel), .fromInterned(dest_info.child))) break :p; const slice_ptr = try sema.analyzeSlicePtr(block, inst_src, inst, inst_ty); @@ -30658,8 +30418,8 @@ fn coerceInMemoryAllowedPtrs( } }; } - const dest_child = Type.fromInterned(dest_info.child); - const src_child = Type.fromInterned(src_info.child); + const dest_child: Type = .fromInterned(dest_info.child); + const src_child: Type = .fromInterned(src_info.child); const child = try sema.coerceInMemoryAllowed( block, dest_child, @@ -30731,7 +30491,7 @@ fn coerceInMemoryAllowedPtrs( .none => Value.@"unreachable", else => Value.fromInterned(dest_info.sentinel), }, - .ty = Type.fromInterned(dest_info.child), + .ty = .fromInterned(dest_info.child), } }; } @@ -30794,8 +30554,8 @@ fn coerceVarArgParam( const inst_bits = uncasted_ty.floatBits(target); if (inst_bits >= double_bits) break :float inst; switch (double_bits) { - 32 => break :float try sema.coerce(block, Type.f32, inst, inst_src), - 64 => break :float try sema.coerce(block, Type.f64, inst, inst_src), + 32 => break :float try sema.coerce(block, .f32, inst, inst_src), + 64 => break :float try sema.coerce(block, .f64, inst, inst_src), else => unreachable, } }, @@ -30807,22 +30567,22 @@ fn coerceVarArgParam( .signed => .int, .unsigned => .uint, })) break :int try sema.coerce(block, switch (uncasted_info.signedness) { - .signed => Type.c_int, - .unsigned => Type.c_uint, + .signed => .c_int, + .unsigned => .c_uint, }, inst, inst_src); if (uncasted_info.bits <= target.cTypeBitSize(switch (uncasted_info.signedness) { .signed => .long, .unsigned => .ulong, })) break :int try sema.coerce(block, switch (uncasted_info.signedness) { - .signed => Type.c_long, - .unsigned => Type.c_ulong, + .signed => .c_long, + .unsigned => .c_ulong, }, inst, inst_src); if (uncasted_info.bits <= target.cTypeBitSize(switch (uncasted_info.signedness) { .signed => .longlong, .unsigned => .ulonglong, })) break :int try sema.coerce(block, switch (uncasted_info.signedness) { - .signed => Type.c_longlong, - .unsigned => Type.c_ulonglong, + .signed => .c_longlong, + .unsigned => .c_ulonglong, }, inst, inst_src); break :int inst; } else inst, @@ -30889,7 +30649,7 @@ fn storePtr2( while (i < field_count) : (i += 1) { const elem_src = operand_src; // TODO better source location const elem = try sema.tupleField(block, operand_src, uncasted_operand, elem_src, i); - const elem_index = try pt.intRef(Type.usize, i); + const elem_index = try pt.intRef(.usize, i); const elem_ptr = try sema.elemPtr(block, ptr_src, ptr, elem_index, elem_src, false, true); try sema.storePtr2(block, src, elem_ptr, elem_src, elem, elem_src, .store); } @@ -31216,7 +30976,7 @@ fn coerceArrayPtrToSlice( const slice_val = try pt.intern(.{ .slice = .{ .ty = dest_ty.toIntern(), .ptr = slice_ptr.toIntern(), - .len = (try pt.intValue(Type.usize, array_ty.arrayLen(zcu))).toIntern(), + .len = (try pt.intValue(.usize, array_ty.arrayLen(zcu))).toIntern(), } }); return Air.internedToRef(slice_val); } @@ -31358,7 +31118,7 @@ fn coerceEnumToUnion( }; const union_obj = zcu.typeToUnion(union_ty).?; - const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[field_index]); + const field_ty: Type = .fromInterned(union_obj.field_types.get(ip)[field_index]); try field_ty.resolveFields(pt); if (field_ty.zigTypeTag(zcu) == .noreturn) { const msg = msg: { @@ -31448,7 +31208,7 @@ fn coerceEnumToUnion( for (0..union_obj.field_types.len) |field_index| { const field_name = union_obj.loadTagType(ip).names.get(ip)[field_index]; - const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[field_index]); + const field_ty: Type = .fromInterned(union_obj.field_types.get(ip)[field_index]); if (!(try field_ty.hasRuntimeBitsSema(pt))) continue; try sema.addFieldErrNote(union_ty, field_index, msg, "field '{}' has type '{}'", .{ field_name.fmt(ip), @@ -31536,7 +31296,7 @@ fn coerceArrayLike( var runtime_src: ?LazySrcLoc = null; for (element_vals, element_refs, 0..) |*val, *ref, i| { - const index_ref = Air.internedToRef((try pt.intValue(Type.usize, i)).toIntern()); + const index_ref = Air.internedToRef((try pt.intValue(.usize, i)).toIntern()); const src = inst_src; // TODO better source location const elem_src = inst_src; // TODO better source location const elem_ref = try sema.elemValArray(block, src, inst_src, inst, elem_src, index_ref, true); @@ -31668,7 +31428,7 @@ fn coerceTupleToArrayPtrs( const zcu = pt.zcu; const tuple = try sema.analyzeLoad(block, tuple_src, ptr_tuple, tuple_src); const ptr_info = ptr_array_ty.ptrInfo(zcu); - const array_ty = Type.fromInterned(ptr_info.child); + const array_ty: Type = .fromInterned(ptr_info.child); const array_inst = try sema.coerceTupleToArray(block, array_ty, array_ty_src, tuple, tuple_src); if (ptr_info.flags.alignment != .none) { return sema.fail(block, array_ty_src, "TODO: override the alignment of the array decl we create here", .{}); @@ -31721,14 +31481,14 @@ fn coerceTupleToTuple( const field_index: u32 = @intCast(field_index_usize); const elem_ref = try sema.tupleField(block, inst_src, inst, field_src, field_i); - const coerced = try sema.coerce(block, Type.fromInterned(field_ty), elem_ref, field_src); + const coerced = try sema.coerce(block, .fromInterned(field_ty), elem_ref, field_src); field_refs[field_index] = coerced; if (default_val != .none) { const init_val = (try sema.resolveValue(coerced)) orelse { return sema.failWithNeededComptime(block, field_src, .{ .simple = .stored_to_comptime_field }); }; - if (!init_val.eql(Value.fromInterned(default_val), Type.fromInterned(field_ty), pt.zcu)) { + if (!init_val.eql(Value.fromInterned(default_val), .fromInterned(field_ty), pt.zcu)) { return sema.failWithInvalidComptimeFieldStore(block, field_src, inst_ty, field_i); } } @@ -31885,7 +31645,7 @@ pub fn ensureNavResolved(sema: *Sema, block: *Block, src: LazySrcLoc, nav_index: fn optRefValue(sema: *Sema, opt_val: ?Value) !Value { const pt = sema.pt; - const ptr_anyopaque_ty = try pt.singleConstPtrType(Type.anyopaque); + const ptr_anyopaque_ty = try pt.singleConstPtrType(.anyopaque); return Value.fromInterned(try pt.intern(.{ .opt = .{ .ty = (try pt.optionalType(ptr_anyopaque_ty.toIntern())).toIntern(), .val = if (opt_val) |val| (try pt.getCoerced( @@ -32140,12 +31900,12 @@ fn analyzeSliceLen( const zcu = pt.zcu; if (try sema.resolveValue(slice_inst)) |slice_val| { if (slice_val.isUndef(zcu)) { - return pt.undefRef(Type.usize); + return .undef_usize; } - return pt.intRef(Type.usize, try slice_val.sliceLen(pt)); + return pt.intRef(.usize, try slice_val.sliceLen(pt)); } try sema.requireRuntimeBlock(block, src, null); - return block.addTyOp(.slice_len, Type.usize, slice_inst); + return block.addTyOp(.slice_len, .usize, slice_inst); } fn analyzeIsNull( @@ -32156,7 +31916,7 @@ fn analyzeIsNull( ) CompileError!Air.Inst.Ref { const pt = sema.pt; const zcu = pt.zcu; - const result_ty = Type.bool; + const result_ty: Type = .bool; if (try sema.resolveValue(operand)) |opt_val| { if (opt_val.isUndef(zcu)) { return pt.undefRef(result_ty); @@ -32224,7 +31984,7 @@ fn analyzeIsNonErrComptimeOnly( else => {}, } } else if (operand == .undef) { - return pt.undefRef(Type.bool); + return .undef_bool; } else if (@intFromEnum(operand) < InternPool.static_len) { // None of the ref tags can be errors. return .bool_true; @@ -32308,14 +32068,7 @@ fn analyzeIsNonErrComptimeOnly( } if (maybe_operand_val) |err_union| { - if (err_union.isUndef(zcu)) { - return pt.undefRef(Type.bool); - } - if (err_union.getErrorName(zcu) == .none) { - return .bool_true; - } else { - return .bool_false; - } + return if (err_union.isUndef(zcu)) .undef_bool else if (err_union.getErrorName(zcu) == .none) .bool_true else .bool_false; } return .none; } @@ -32412,8 +32165,8 @@ fn analyzeSlice( ); const bounds_error_message = "slice of single-item pointer must have bounds [0..0], [0..1], or [1..1]"; - if (try sema.compareScalar(start_value, .neq, end_value, Type.comptime_int)) { - if (try sema.compareScalar(start_value, .neq, Value.zero_comptime_int, Type.comptime_int)) { + if (try sema.compareScalar(start_value, .neq, end_value, .comptime_int)) { + if (try sema.compareScalar(start_value, .neq, Value.zero_comptime_int, .comptime_int)) { const msg = msg: { const msg = try sema.errMsg(start_src, bounds_error_message, .{}); errdefer msg.destroy(sema.gpa); @@ -32429,7 +32182,7 @@ fn analyzeSlice( break :msg msg; }; return sema.failWithOwnedErrorMsg(block, msg); - } else if (try sema.compareScalar(end_value, .neq, Value.one_comptime_int, Type.comptime_int)) { + } else if (try sema.compareScalar(end_value, .neq, Value.one_comptime_int, .comptime_int)) { const msg = msg: { const msg = try sema.errMsg(end_src, bounds_error_message, .{}); errdefer msg.destroy(sema.gpa); @@ -32447,7 +32200,7 @@ fn analyzeSlice( return sema.failWithOwnedErrorMsg(block, msg); } } else { - if (try sema.compareScalar(end_value, .gt, Value.one_comptime_int, Type.comptime_int)) { + if (try sema.compareScalar(end_value, .gt, Value.one_comptime_int, .comptime_int)) { return sema.fail( block, end_src, @@ -32512,7 +32265,7 @@ fn analyzeSlice( break :ptr try sema.coerceCompatiblePtrs(block, try pt.ptrTypeSema(manyptr_ty_key), ptr_or_slice, ptr_src); } else ptr_or_slice; - const start = try sema.coerce(block, Type.usize, uncasted_start, start_src); + const start = try sema.coerce(block, .usize, uncasted_start, start_src); const new_ptr = try sema.analyzePtrArithmetic(block, src, ptr, start, .ptr_add, ptr_src, start_src); const new_ptr_ty = sema.typeOf(new_ptr); @@ -32523,20 +32276,20 @@ fn analyzeSlice( var end_is_len = uncasted_end_opt == .none; const end = e: { if (array_ty.zigTypeTag(zcu) == .array) { - const len_val = try pt.intValue(Type.usize, array_ty.arrayLen(zcu)); + const len_val = try pt.intValue(.usize, array_ty.arrayLen(zcu)); if (!end_is_len) { const end = if (by_length) end: { - const len = try sema.coerce(block, Type.usize, uncasted_end_opt, end_src); + const len = try sema.coerce(block, .usize, uncasted_end_opt, end_src); const uncasted_end = try sema.analyzeArithmetic(block, .add, start, len, src, start_src, end_src, false); - break :end try sema.coerce(block, Type.usize, uncasted_end, end_src); - } else try sema.coerce(block, Type.usize, uncasted_end_opt, end_src); + break :end try sema.coerce(block, .usize, uncasted_end, end_src); + } else try sema.coerce(block, .usize, uncasted_end_opt, end_src); if (try sema.resolveDefinedValue(block, end_src, end)) |end_val| { const len_s_val = try pt.intValue( - Type.usize, + .usize, array_ty.arrayLenIncludingSentinel(zcu), ); - if (!(try sema.compareAll(end_val, .lte, len_s_val, Type.usize))) { + if (!(try sema.compareAll(end_val, .lte, len_s_val, .usize))) { const sentinel_label: []const u8 = if (array_ty.sentinel(zcu) != null) " +1 (sentinel)" else @@ -32557,7 +32310,7 @@ fn analyzeSlice( // end_is_len is only true if we are NOT using the sentinel // length. For sentinel-length, we don't want the type to // contain the sentinel. - if (end_val.eql(len_val, Type.usize, zcu)) { + if (end_val.eql(len_val, .usize, zcu)) { end_is_len = true; } } @@ -32568,10 +32321,10 @@ fn analyzeSlice( } else if (slice_ty.isSlice(zcu)) { if (!end_is_len) { const end = if (by_length) end: { - const len = try sema.coerce(block, Type.usize, uncasted_end_opt, end_src); + const len = try sema.coerce(block, .usize, uncasted_end_opt, end_src); const uncasted_end = try sema.analyzeArithmetic(block, .add, start, len, src, start_src, end_src, false); - break :end try sema.coerce(block, Type.usize, uncasted_end, end_src); - } else try sema.coerce(block, Type.usize, uncasted_end_opt, end_src); + break :end try sema.coerce(block, .usize, uncasted_end, end_src); + } else try sema.coerce(block, .usize, uncasted_end_opt, end_src); if (try sema.resolveDefinedValue(block, end_src, end)) |end_val| { if (try sema.resolveValue(ptr_or_slice)) |slice_val| { if (slice_val.isUndef(zcu)) { @@ -32580,8 +32333,8 @@ fn analyzeSlice( const has_sentinel = slice_ty.sentinel(zcu) != null; const slice_len = try slice_val.sliceLen(pt); const len_plus_sent = slice_len + @intFromBool(has_sentinel); - const slice_len_val_with_sentinel = try pt.intValue(Type.usize, len_plus_sent); - if (!(try sema.compareAll(end_val, .lte, slice_len_val_with_sentinel, Type.usize))) { + const slice_len_val_with_sentinel = try pt.intValue(.usize, len_plus_sent); + if (!(try sema.compareAll(end_val, .lte, slice_len_val_with_sentinel, .usize))) { const sentinel_label: []const u8 = if (has_sentinel) " +1 (sentinel)" else @@ -32602,8 +32355,8 @@ fn analyzeSlice( // If the slice has a sentinel, we consider end_is_len // is only true if it equals the length WITHOUT the // sentinel, so we don't add a sentinel type. - const slice_len_val = try pt.intValue(Type.usize, slice_len); - if (end_val.eql(slice_len_val, Type.usize, zcu)) { + const slice_len_val = try pt.intValue(.usize, slice_len); + if (end_val.eql(slice_len_val, .usize, zcu)) { end_is_len = true; } } @@ -32614,10 +32367,10 @@ fn analyzeSlice( } if (!end_is_len) { if (by_length) { - const len = try sema.coerce(block, Type.usize, uncasted_end_opt, end_src); + const len = try sema.coerce(block, .usize, uncasted_end_opt, end_src); const uncasted_end = try sema.analyzeArithmetic(block, .add, start, len, src, start_src, end_src, false); - break :e try sema.coerce(block, Type.usize, uncasted_end, end_src); - } else break :e try sema.coerce(block, Type.usize, uncasted_end_opt, end_src); + break :e try sema.coerce(block, .usize, uncasted_end, end_src); + } else break :e try sema.coerce(block, .usize, uncasted_end_opt, end_src); } return sema.analyzePtrArithmetic(block, src, ptr, start, .ptr_add, ptr_src, start_src); }; @@ -32645,7 +32398,7 @@ fn analyzeSlice( // requirement: start <= end if (try sema.resolveDefinedValue(block, end_src, end)) |end_val| { if (try sema.resolveDefinedValue(block, start_src, start)) |start_val| { - if (!by_length and !(try sema.compareAll(start_val, .lte, end_val, Type.usize))) { + if (!by_length and !(try sema.compareAll(start_val, .lte, end_val, .usize))) { return sema.fail( block, start_src, @@ -32715,7 +32468,7 @@ fn analyzeSlice( try sema.addSafetyCheckCall(block, src, ok, .@"panic.startGreaterThanEnd", &.{ start, end }); } const new_len = if (by_length) - try sema.coerce(block, Type.usize, uncasted_end_opt, end_src) + try sema.coerce(block, .usize, uncasted_end_opt, end_src) else try sema.analyzeArithmetic(block, .sub, end, start, src, end_src, start_src, false); const opt_new_len_val = try sema.resolveDefinedValue(block, src, new_len); @@ -32753,9 +32506,9 @@ fn analyzeSlice( bounds_check: { const actual_len = if (array_ty.zigTypeTag(zcu) == .array) - try pt.intRef(Type.usize, array_ty.arrayLenIncludingSentinel(zcu)) + try pt.intRef(.usize, array_ty.arrayLenIncludingSentinel(zcu)) else if (slice_ty.isSlice(zcu)) l: { - const slice_len_inst = try block.addTyOp(.slice_len, Type.usize, ptr_or_slice); + const slice_len_inst = try block.addTyOp(.slice_len, .usize, ptr_or_slice); break :l if (slice_ty.sentinel(zcu) == null) slice_len_inst else @@ -32811,15 +32564,15 @@ fn analyzeSlice( // requirement: end <= len const opt_len_inst = if (array_ty.zigTypeTag(zcu) == .array) - try pt.intRef(Type.usize, array_ty.arrayLenIncludingSentinel(zcu)) + try pt.intRef(.usize, array_ty.arrayLenIncludingSentinel(zcu)) else if (slice_ty.isSlice(zcu)) blk: { if (try sema.resolveDefinedValue(block, src, ptr_or_slice)) |slice_val| { // we don't need to add one for sentinels because the // underlying value data includes the sentinel - break :blk try pt.intRef(Type.usize, try slice_val.sliceLen(pt)); + break :blk try pt.intRef(.usize, try slice_val.sliceLen(pt)); } - const slice_len_inst = try block.addTyOp(.slice_len, Type.usize, ptr_or_slice); + const slice_len_inst = try block.addTyOp(.slice_len, .usize, ptr_or_slice); if (slice_ty.sentinel(zcu) == null) break :blk slice_len_inst; // we have to add one because slice lengths don't include the sentinel @@ -32935,8 +32688,8 @@ fn cmpNumeric( } // Any other comparison depends on both values, so the result is undef if either is undef. - if (maybe_lhs_val) |v| if (v.isUndef(zcu)) return pt.undefRef(Type.bool); - if (maybe_rhs_val) |v| if (v.isUndef(zcu)) return pt.undefRef(Type.bool); + if (maybe_lhs_val) |v| if (v.isUndef(zcu)) return .undef_bool; + if (maybe_rhs_val) |v| if (v.isUndef(zcu)) return .undef_bool; const runtime_src: LazySrcLoc = if (maybe_lhs_val) |lhs_val| rs: { if (maybe_rhs_val) |rhs_val| { @@ -33646,7 +33399,7 @@ fn resolvePeerTypes( candidate_srcs: PeerTypeCandidateSrc, ) !Type { switch (instructions.len) { - 0 => return Type.noreturn, + 0 => return .noreturn, 1 => return sema.typeOf(instructions[0]), else => {}, } @@ -33780,12 +33533,12 @@ fn resolvePeerTypesInner( .nullable => { for (peer_tys, 0..) |opt_ty, i| { const ty = opt_ty orelse continue; - if (!ty.eql(Type.null, zcu)) return .{ .conflict = .{ + if (!ty.eql(.null, zcu)) return .{ .conflict = .{ .peer_idx_a = strat_reason, .peer_idx_b = i, } }; } - return .{ .success = Type.null }; + return .{ .success = .null }; }, .optional => { @@ -34006,7 +33759,7 @@ fn resolvePeerTypesInner( }; // Try peer -> cur, then cur -> peer - ptr_info.child = ((try sema.resolvePairInMemoryCoercible(block, src, Type.fromInterned(ptr_info.child), Type.fromInterned(peer_info.child))) orelse { + ptr_info.child = ((try sema.resolvePairInMemoryCoercible(block, src, .fromInterned(ptr_info.child), .fromInterned(peer_info.child))) orelse { return .{ .conflict = .{ .peer_idx_a = first_idx, .peer_idx_b = i, @@ -34153,8 +33906,8 @@ fn resolvePeerTypesInner( }; // We abstract array handling slightly so that tuple pointers can work like array pointers - const peer_pointee_array = sema.typeIsArrayLike(Type.fromInterned(peer_info.child)); - const cur_pointee_array = sema.typeIsArrayLike(Type.fromInterned(ptr_info.child)); + const peer_pointee_array = sema.typeIsArrayLike(.fromInterned(peer_info.child)); + const cur_pointee_array = sema.typeIsArrayLike(.fromInterned(ptr_info.child)); // This switch is just responsible for deciding the size and pointee (not including // single-pointer array sentinel). @@ -34162,7 +33915,7 @@ fn resolvePeerTypesInner( switch (peer_info.flags.size) { .one => switch (ptr_info.flags.size) { .one => { - if (try sema.resolvePairInMemoryCoercible(block, src, Type.fromInterned(ptr_info.child), Type.fromInterned(peer_info.child))) |pointee| { + if (try sema.resolvePairInMemoryCoercible(block, src, .fromInterned(ptr_info.child), .fromInterned(peer_info.child))) |pointee| { ptr_info.child = pointee.toIntern(); break :good; } @@ -34204,7 +33957,7 @@ fn resolvePeerTypesInner( .many => { // Only works for *[n]T + [*]T -> [*]T const arr = peer_pointee_array orelse return generic_err; - if (try sema.resolvePairInMemoryCoercible(block, src, Type.fromInterned(ptr_info.child), arr.elem_ty)) |pointee| { + if (try sema.resolvePairInMemoryCoercible(block, src, .fromInterned(ptr_info.child), arr.elem_ty)) |pointee| { ptr_info.child = pointee.toIntern(); break :good; } @@ -34217,7 +33970,7 @@ fn resolvePeerTypesInner( .slice => { // Only works for *[n]T + []T -> []T const arr = peer_pointee_array orelse return generic_err; - if (try sema.resolvePairInMemoryCoercible(block, src, Type.fromInterned(ptr_info.child), arr.elem_ty)) |pointee| { + if (try sema.resolvePairInMemoryCoercible(block, src, .fromInterned(ptr_info.child), arr.elem_ty)) |pointee| { ptr_info.child = pointee.toIntern(); break :good; } @@ -34233,7 +33986,7 @@ fn resolvePeerTypesInner( .one => { // Only works for [*]T + *[n]T -> [*]T const arr = cur_pointee_array orelse return generic_err; - if (try sema.resolvePairInMemoryCoercible(block, src, arr.elem_ty, Type.fromInterned(peer_info.child))) |pointee| { + if (try sema.resolvePairInMemoryCoercible(block, src, arr.elem_ty, .fromInterned(peer_info.child))) |pointee| { ptr_info.flags.size = .many; ptr_info.child = pointee.toIntern(); break :good; @@ -34247,7 +34000,7 @@ fn resolvePeerTypesInner( return generic_err; }, .many => { - if (try sema.resolvePairInMemoryCoercible(block, src, Type.fromInterned(ptr_info.child), Type.fromInterned(peer_info.child))) |pointee| { + if (try sema.resolvePairInMemoryCoercible(block, src, .fromInterned(ptr_info.child), .fromInterned(peer_info.child))) |pointee| { ptr_info.child = pointee.toIntern(); break :good; } @@ -34262,7 +34015,7 @@ fn resolvePeerTypesInner( } }; } // Okay, then works for [*]T + "[]T" -> [*]T - if (try sema.resolvePairInMemoryCoercible(block, src, Type.fromInterned(ptr_info.child), Type.fromInterned(peer_info.child))) |pointee| { + if (try sema.resolvePairInMemoryCoercible(block, src, .fromInterned(ptr_info.child), .fromInterned(peer_info.child))) |pointee| { ptr_info.flags.size = .many; ptr_info.child = pointee.toIntern(); break :good; @@ -34275,7 +34028,7 @@ fn resolvePeerTypesInner( .one => { // Only works for []T + *[n]T -> []T const arr = cur_pointee_array orelse return generic_err; - if (try sema.resolvePairInMemoryCoercible(block, src, arr.elem_ty, Type.fromInterned(peer_info.child))) |pointee| { + if (try sema.resolvePairInMemoryCoercible(block, src, arr.elem_ty, .fromInterned(peer_info.child))) |pointee| { ptr_info.flags.size = .slice; ptr_info.child = pointee.toIntern(); break :good; @@ -34293,7 +34046,7 @@ fn resolvePeerTypesInner( return generic_err; }, .slice => { - if (try sema.resolvePairInMemoryCoercible(block, src, Type.fromInterned(ptr_info.child), Type.fromInterned(peer_info.child))) |pointee| { + if (try sema.resolvePairInMemoryCoercible(block, src, .fromInterned(ptr_info.child), .fromInterned(peer_info.child))) |pointee| { ptr_info.child = pointee.toIntern(); break :good; } @@ -34479,7 +34232,7 @@ fn resolvePeerTypesInner( } }, } } - return .{ .success = Type.comptime_int }; + return .{ .success = .comptime_int }; }, .comptime_float => { @@ -34493,7 +34246,7 @@ fn resolvePeerTypesInner( } }, } } - return .{ .success = Type.comptime_float }; + return .{ .success = .comptime_float }; }, .fixed_int => { @@ -34601,11 +34354,11 @@ fn resolvePeerTypesInner( // Recreate the type so we eliminate any c_longdouble const bits = @max(cur_ty.floatBits(target), ty.floatBits(target)); opt_cur_ty = switch (bits) { - 16 => Type.f16, - 32 => Type.f32, - 64 => Type.f64, - 80 => Type.f80, - 128 => Type.f128, + 16 => .f16, + 32 => .f32, + 64 => .f64, + 80 => .f80, + 128 => .f128, else => unreachable, }; } else { @@ -34716,7 +34469,7 @@ fn resolvePeerTypesInner( break; }; const uncoerced_field = Air.internedToRef(uncoerced_field_val.toIntern()); - const coerced_inst = sema.coerceExtra(block, Type.fromInterned(field_ty.*), uncoerced_field, src, .{ .report_err = false }) catch |err| switch (err) { + const coerced_inst = sema.coerceExtra(block, .fromInterned(field_ty.*), uncoerced_field, src, .{ .report_err = false }) catch |err| switch (err) { // It's possible for PTR to give false positives. Just give up on making this a comptime field, we'll get an error later anyway error.NotCoercible => { comptime_val = null; @@ -34729,7 +34482,7 @@ fn resolvePeerTypesInner( comptime_val = coerced_val; continue; }; - if (!coerced_val.eql(existing, Type.fromInterned(field_ty.*), zcu)) { + if (!coerced_val.eql(existing, .fromInterned(field_ty.*), zcu)) { comptime_val = null; break; } @@ -34743,7 +34496,7 @@ fn resolvePeerTypesInner( .values = field_vals, }); - return .{ .success = Type.fromInterned(final_ty) }; + return .{ .success = .fromInterned(final_ty) }; }, .exact => { @@ -34813,7 +34566,7 @@ fn typeIsArrayLike(sema: *Sema, ty: Type) ?ArrayLike { const field_count = ty.structFieldCount(zcu); if (field_count == 0) return .{ .len = 0, - .elem_ty = Type.noreturn, + .elem_ty = .noreturn, }; if (!ty.isTuple(zcu)) return null; const elem_ty = ty.fieldType(0, zcu); @@ -34902,7 +34655,7 @@ pub fn resolveStructAlignment( var alignment: Alignment = .@"1"; for (0..struct_type.field_types.len) |i| { - const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[i]); + const field_ty: Type = .fromInterned(struct_type.field_types.get(ip)[i]); if (struct_type.fieldIsComptime(ip, i) or try field_ty.comptimeOnlySema(pt)) continue; const field_align = try field_ty.structFieldAlignmentSema( @@ -34953,7 +34706,7 @@ pub fn resolveStructLayout(sema: *Sema, ty: Type) SemaError!void { var big_align: Alignment = .@"1"; for (aligns, sizes, 0..) |*field_align, *field_size, i| { - const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[i]); + const field_ty: Type = .fromInterned(struct_type.field_types.get(ip)[i]); if (struct_type.fieldIsComptime(ip, i) or try field_ty.comptimeOnlySema(pt)) { struct_type.offsets.get(ip)[i] = 0; field_size.* = 0; @@ -35001,7 +34754,7 @@ pub fn resolveStructLayout(sema: *Sema, ty: Type) SemaError!void { const runtime_order = struct_type.runtime_order.get(ip); for (runtime_order, 0..) |*ro, i| { - const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[i]); + const field_ty: Type = .fromInterned(struct_type.field_types.get(ip)[i]); if (struct_type.fieldIsComptime(ip, i) or try field_ty.comptimeOnlySema(pt)) { ro.* = .omitted; } else { @@ -35095,7 +34848,7 @@ fn backingIntType( const fields_bit_sum = blk: { var accumulator: u64 = 0; for (0..struct_type.field_types.len) |i| { - const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[i]); + const field_ty: Type = .fromInterned(struct_type.field_types.get(ip)[i]); accumulator += try field_ty.bitSizeSema(pt); } break :blk accumulator; @@ -35234,7 +34987,7 @@ pub fn resolveUnionAlignment( var max_align: Alignment = .@"1"; for (0..union_type.field_types.len) |field_index| { - const field_ty = Type.fromInterned(union_type.field_types.get(ip)[field_index]); + const field_ty: Type = .fromInterned(union_type.field_types.get(ip)[field_index]); if (!(try field_ty.hasRuntimeBitsSema(pt))) continue; const explicit_align = union_type.fieldAlign(ip, field_index); @@ -35282,7 +35035,7 @@ pub fn resolveUnionLayout(sema: *Sema, ty: Type) SemaError!void { var max_size: u64 = 0; var max_align: Alignment = .@"1"; for (0..union_type.field_types.len) |field_index| { - const field_ty = Type.fromInterned(union_type.field_types.get(ip)[field_index]); + const field_ty: Type = .fromInterned(union_type.field_types.get(ip)[field_index]); if (try field_ty.comptimeOnlySema(pt) or field_ty.zigTypeTag(pt.zcu) == .noreturn) continue; // TODO: should this affect alignment? @@ -35307,7 +35060,7 @@ pub fn resolveUnionLayout(sema: *Sema, ty: Type) SemaError!void { const has_runtime_tag = union_type.flagsUnordered(ip).runtime_tag.hasTag() and try Type.fromInterned(union_type.enum_tag_ty).hasRuntimeBitsSema(pt); const size, const alignment, const padding = if (has_runtime_tag) layout: { - const enum_tag_type = Type.fromInterned(union_type.enum_tag_ty); + const enum_tag_type: Type = .fromInterned(union_type.enum_tag_ty); const tag_align = try enum_tag_type.abiAlignmentSema(pt); const tag_size = try enum_tag_type.abiSizeSema(pt); @@ -35392,7 +35145,7 @@ pub fn resolveStructFully(sema: *Sema, ty: Type) SemaError!void { // See also similar code for unions. for (0..struct_type.field_types.len) |i| { - const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[i]); + const field_ty: Type = .fromInterned(struct_type.field_types.get(ip)[i]); try field_ty.resolveFully(pt); } } @@ -35421,7 +35174,7 @@ pub fn resolveUnionFully(sema: *Sema, ty: Type) SemaError!void { union_obj.setStatus(ip, .fully_resolved_wip); for (0..union_obj.field_types.len) |field_index| { - const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[field_index]); + const field_ty: Type = .fromInterned(union_obj.field_types.get(ip)[field_index]); try field_ty.resolveFully(pt); } union_obj.setStatus(ip, .fully_resolved); @@ -35553,7 +35306,7 @@ fn resolveInferredErrorSet( // set. However, in the case of comptime/inline function calls with // inferred error sets, each call gets an adhoc InferredErrorSet object, which // has no corresponding function body. - const ies_func_info = zcu.typeToFunc(Type.fromInterned(func.ty)).?; + const ies_func_info = zcu.typeToFunc(.fromInterned(func.ty)).?; // if ies declared by a inline function with generic return type, the return_type should be generic_poison, // because inline function does not create a new declaration, and the ies has been filled with analyzeCall, // so here we can simply skip this case. @@ -36008,7 +35761,7 @@ fn structFieldInits( // In init bodies, the zir index of the struct itself is used // to refer to the current field type. - const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[field_i]); + const field_ty: Type = .fromInterned(struct_type.field_types.get(ip)[field_i]); const type_ref = Air.internedToRef(field_ty.toIntern()); try sema.inst_map.ensureSpaceForInstructions(sema.gpa, &.{zir_index}); sema.inst_map.putAssumeCapacity(zir_index, type_ref); @@ -36135,7 +35888,7 @@ fn unionFields( } if (fields_len > 0) { - const field_count_val = try pt.intValue(Type.comptime_int, fields_len - 1); + const field_count_val = try pt.intValue(.comptime_int, fields_len - 1); if (!(try sema.intFitsInType(field_count_val, int_tag_ty, null))) { const msg = msg: { const msg = try sema.errMsg(tag_ty_src, "specified integer tag type cannot represent every field", .{}); @@ -36288,9 +36041,9 @@ fn unionFields( } const field_ty: Type = if (!has_type) - Type.void + .void else if (field_type_ref == .none) - Type.noreturn + .noreturn else try sema.resolveType(&block_scope, type_src, field_type_ref); @@ -36388,11 +36141,11 @@ fn unionFields( for (tag_info.names.get(ip), 0..) |field_name, field_index| { if (explicit_tags_seen[field_index]) continue; - try sema.addFieldErrNote(Type.fromInterned(tag_ty), field_index, msg, "field '{}' missing, declared here", .{ + try sema.addFieldErrNote(.fromInterned(tag_ty), field_index, msg, "field '{}' missing, declared here", .{ field_name.fmt(ip), }); } - try sema.addDeclaredHereNote(msg, Type.fromInterned(tag_ty)); + try sema.addDeclaredHereNote(msg, .fromInterned(tag_ty)); break :msg msg; }; return sema.failWithOwnedErrorMsg(&block_scope, msg); @@ -36530,10 +36283,11 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { .comptime_int_type, .comptime_float_type, .enum_literal_type, + .ptr_usize_type, + .ptr_const_comptime_int_type, .manyptr_u8_type, .manyptr_const_u8_type, .manyptr_const_u8_sentinel_0_type, - .single_const_pointer_to_comptime_int_type, .slice_const_u8_type, .slice_const_u8_sentinel_0_type, .vector_8_i8_type, @@ -36595,11 +36349,16 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { .empty_tuple_type => Value.empty_tuple, // values, not types .undef, + .undef_bool, + .undef_usize, + .undef_u1, .zero, .zero_usize, + .zero_u1, .zero_u8, .one, .one_usize, + .one_u1, .one_u8, .four_u8, .negative_one, @@ -36705,7 +36464,7 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { .storage = .{ .elems = &.{} }, } })); - if (try sema.typeHasOnePossibleValue(Type.fromInterned(seq_type.child))) |opv| { + if (try sema.typeHasOnePossibleValue(.fromInterned(seq_type.child))) |opv| { return Value.fromInterned(try pt.intern(.{ .aggregate = .{ .ty = ty.toIntern(), .storage = .{ .repeated_elem = opv.toIntern() }, @@ -36740,7 +36499,7 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { field_val.* = struct_type.field_inits.get(ip)[i]; continue; } - const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[i]); + const field_ty: Type = .fromInterned(struct_type.field_types.get(ip)[i]); if (try sema.typeHasOnePossibleValue(field_ty)) |field_opv| { field_val.* = field_opv.toIntern(); } else return null; @@ -36773,13 +36532,13 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { try ty.resolveLayout(pt); const union_obj = ip.loadUnionType(ty.toIntern()); - const tag_val = (try sema.typeHasOnePossibleValue(Type.fromInterned(union_obj.tagTypeUnordered(ip)))) orelse + const tag_val = (try sema.typeHasOnePossibleValue(.fromInterned(union_obj.tagTypeUnordered(ip)))) orelse return null; if (union_obj.field_types.len == 0) { const only = try pt.intern(.{ .empty_enum_value = ty.toIntern() }); return Value.fromInterned(only); } - const only_field_ty = Type.fromInterned(union_obj.field_types.get(ip)[0]); + const only_field_ty: Type = .fromInterned(union_obj.field_types.get(ip)[0]); const val_val = (try sema.typeHasOnePossibleValue(only_field_ty)) orelse return null; const only = try pt.internUnion(.{ @@ -36796,7 +36555,7 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { .nonexhaustive => { if (enum_type.tag_ty == .comptime_int_type) return null; - if (try sema.typeHasOnePossibleValue(Type.fromInterned(enum_type.tag_ty))) |int_opv| { + if (try sema.typeHasOnePossibleValue(.fromInterned(enum_type.tag_ty))) |int_opv| { const only = try pt.intern(.{ .enum_tag = .{ .ty = ty.toIntern(), .int = int_opv.toIntern(), @@ -36814,7 +36573,7 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { 1 => try pt.intern(.{ .enum_tag = .{ .ty = ty.toIntern(), .int = if (enum_type.values.len == 0) - (try pt.intValue(Type.fromInterned(enum_type.tag_ty), 0)).toIntern() + (try pt.intValue(.fromInterned(enum_type.tag_ty), 0)).toIntern() else try ip.getCoercedInts( zcu.gpa, @@ -37041,7 +36800,7 @@ fn typePtrOrOptionalPtrTy(sema: *Sema, ty: Type) !?Type { if (ptr_type.flags.is_allowzero) return null; // optionals of zero sized types behave like bools, not pointers - const payload_ty = Type.fromInterned(opt_child); + const payload_ty: Type = .fromInterned(opt_child); if ((try sema.typeHasOnePossibleValue(payload_ty)) != null) { return null; } @@ -37175,7 +36934,7 @@ fn intFromFloatScalar( var big_int = try float128IntPartToBigInt(sema.arena, float); defer big_int.deinit(); - const cti_result = try pt.intValue_big(Type.comptime_int, big_int.toConst()); + const cti_result = try pt.intValue_big(.comptime_int, big_int.toConst()); if (!(try sema.intFitsInType(cti_result, int_ty, null))) { return sema.fail(block, src, "float value '{}' cannot be stored in integer type '{}'", .{ @@ -37278,8 +37037,8 @@ fn enumHasInt(sema: *Sema, ty: Type, int: Value) CompileError!bool { assert(enum_type.tag_mode != .nonexhaustive); // The `tagValueIndex` function call below relies on the type being the integer tag type. // `getCoerced` assumes the value will fit the new type. - if (!(try sema.intFitsInType(int, Type.fromInterned(enum_type.tag_ty), null))) return false; - const int_coerced = try pt.getCoerced(int, Type.fromInterned(enum_type.tag_ty)); + if (!(try sema.intFitsInType(int, .fromInterned(enum_type.tag_ty), null))) return false; + const int_coerced = try pt.getCoerced(int, .fromInterned(enum_type.tag_ty)); return enum_type.tagValueIndex(&zcu.intern_pool, int_coerced.toIntern()) != null; } @@ -37359,7 +37118,7 @@ fn compareVector( const lhs_elem = try lhs.elemValue(pt, i); const rhs_elem = try rhs.elemValue(pt, i); if (lhs_elem.isUndef(zcu) or rhs_elem.isUndef(zcu)) { - scalar.* = try pt.intern(.{ .undef = .bool_type }); + scalar.* = .undef_bool; } else { const res_bool = try sema.compareScalar(lhs_elem, op, rhs_elem, ty.scalarType(zcu)); scalar.* = Value.makeBool(res_bool).toIntern(); @@ -37826,7 +37585,7 @@ pub fn resolveDeclaredEnum( .owner = .wrap(.{ .type = wip_ty.index }), .func_index = .none, .func_is_naked = false, - .fn_ret_ty = Type.void, + .fn_ret_ty = .void, .fn_ret_ty_ies = null, .comptime_err_ret_trace = &comptime_err_ret_trace, }; @@ -37999,7 +37758,7 @@ fn resolveDeclaredEnumInner( break :overflow false; } else overflow: { assert(wip_ty.nextField(ip, field_name, .none) == null); - last_tag_val = try pt.intValue(Type.comptime_int, field_i); + last_tag_val = try pt.intValue(.comptime_int, field_i); if (!try sema.intFitsInType(last_tag_val.?, int_tag_ty, null)) break :overflow true; last_tag_val = try pt.getCoerced(last_tag_val.?, int_tag_ty); break :overflow false; @@ -38222,8 +37981,7 @@ fn getExpectedBuiltinFnType(sema: *Sema, decl: Zcu.BuiltinDecl) CompileError!Typ .@"panic.castToNull", .@"panic.incorrectAlignment", .@"panic.invalidErrorCode", - .@"panic.castTruncatedData", - .@"panic.negativeToUnsigned", + .@"panic.integerOutOfBounds", .@"panic.integerOverflow", .@"panic.shlOverflow", .@"panic.shrOverflow", diff --git a/src/Sema/arith.zig b/src/Sema/arith.zig index ba94220cb4..85abd351d8 100644 --- a/src/Sema/arith.zig +++ b/src/Sema/arith.zig @@ -168,7 +168,7 @@ fn addWithOverflowScalar( else => unreachable, } if (lhs.isUndef(zcu) or rhs.isUndef(zcu)) return .{ - .overflow_bit = try pt.undefValue(.u1), + .overflow_bit = .undef_u1, .wrapped_result = try pt.undefValue(ty), }; return intAddWithOverflow(sema, lhs, rhs, ty); @@ -229,7 +229,7 @@ fn subWithOverflowScalar( else => unreachable, } if (lhs.isUndef(zcu) or rhs.isUndef(zcu)) return .{ - .overflow_bit = try pt.undefValue(.u1), + .overflow_bit = .undef_u1, .wrapped_result = try pt.undefValue(ty), }; return intSubWithOverflow(sema, lhs, rhs, ty); @@ -290,7 +290,7 @@ fn mulWithOverflowScalar( else => unreachable, } if (lhs.isUndef(zcu) or rhs.isUndef(zcu)) return .{ - .overflow_bit = try pt.undefValue(.u1), + .overflow_bit = .undef_u1, .wrapped_result = try pt.undefValue(ty), }; return intMulWithOverflow(sema, lhs, rhs, ty); @@ -1043,7 +1043,7 @@ fn comptimeIntAdd(sema: *Sema, lhs: Value, rhs: Value) !Value { fn intAddWithOverflow(sema: *Sema, lhs: Value, rhs: Value, ty: Type) !Value.OverflowArithmeticResult { switch (ty.toIntern()) { .comptime_int_type => return .{ - .overflow_bit = try sema.pt.intValue(.u1, 0), + .overflow_bit = .zero_u1, .wrapped_result = try comptimeIntAdd(sema, lhs, rhs), }, else => return intAddWithOverflowInner(sema, lhs, rhs, ty), @@ -1125,7 +1125,7 @@ fn comptimeIntSub(sema: *Sema, lhs: Value, rhs: Value) !Value { fn intSubWithOverflow(sema: *Sema, lhs: Value, rhs: Value, ty: Type) !Value.OverflowArithmeticResult { switch (ty.toIntern()) { .comptime_int_type => return .{ - .overflow_bit = try sema.pt.intValue(.u1, 0), + .overflow_bit = .zero_u1, .wrapped_result = try comptimeIntSub(sema, lhs, rhs), }, else => return intSubWithOverflowInner(sema, lhs, rhs, ty), @@ -1211,7 +1211,7 @@ fn comptimeIntMul(sema: *Sema, lhs: Value, rhs: Value) !Value { fn intMulWithOverflow(sema: *Sema, lhs: Value, rhs: Value, ty: Type) !Value.OverflowArithmeticResult { switch (ty.toIntern()) { .comptime_int_type => return .{ - .overflow_bit = try sema.pt.intValue(.u1, 0), + .overflow_bit = .zero_u1, .wrapped_result = try comptimeIntMul(sema, lhs, rhs), }, else => return intMulWithOverflowInner(sema, lhs, rhs, ty), diff --git a/src/Type.zig b/src/Type.zig index f957a41808..a1b64a17fa 100644 --- a/src/Type.zig +++ b/src/Type.zig @@ -2641,10 +2641,7 @@ pub fn onePossibleValue(starting_type: Type, pt: Zcu.PerThread) !?Value { if (enum_type.values.len == 0) { const only = try pt.intern(.{ .enum_tag = .{ .ty = ty.toIntern(), - .int = try pt.intern(.{ .int = .{ - .ty = enum_type.tag_ty, - .storage = .{ .u64 = 0 }, - } }), + .int = (try pt.intValue(.fromInterned(enum_type.tag_ty), 0)).toIntern(), } }); return Value.fromInterned(only); } else { @@ -3676,10 +3673,11 @@ pub fn resolveFields(ty: Type, pt: Zcu.PerThread) SemaError!void { .null_type, .undefined_type, .enum_literal_type, + .ptr_usize_type, + .ptr_const_comptime_int_type, .manyptr_u8_type, .manyptr_const_u8_type, .manyptr_const_u8_sentinel_0_type, - .single_const_pointer_to_comptime_int_type, .slice_const_u8_type, .slice_const_u8_sentinel_0_type, .optional_noreturn_type, @@ -3691,9 +3689,11 @@ pub fn resolveFields(ty: Type, pt: Zcu.PerThread) SemaError!void { .undef => unreachable, .zero => unreachable, .zero_usize => unreachable, + .zero_u1 => unreachable, .zero_u8 => unreachable, .one => unreachable, .one_usize => unreachable, + .one_u1 => unreachable, .one_u8 => unreachable, .four_u8 => unreachable, .negative_one => unreachable, @@ -4100,10 +4100,11 @@ pub const @"c_longlong": Type = .{ .ip_index = .c_longlong_type }; pub const @"c_ulonglong": Type = .{ .ip_index = .c_ulonglong_type }; pub const @"c_longdouble": Type = .{ .ip_index = .c_longdouble_type }; +pub const ptr_usize: Type = .{ .ip_index = .ptr_usize_type }; +pub const ptr_const_comptime_int: Type = .{ .ip_index = .ptr_const_comptime_int_type }; pub const manyptr_u8: Type = .{ .ip_index = .manyptr_u8_type }; pub const manyptr_const_u8: Type = .{ .ip_index = .manyptr_const_u8_type }; pub const manyptr_const_u8_sentinel_0: Type = .{ .ip_index = .manyptr_const_u8_sentinel_0_type }; -pub const single_const_pointer_to_comptime_int: Type = .{ .ip_index = .single_const_pointer_to_comptime_int_type }; pub const slice_const_u8: Type = .{ .ip_index = .slice_const_u8_type }; pub const slice_const_u8_sentinel_0: Type = .{ .ip_index = .slice_const_u8_sentinel_0_type }; diff --git a/src/Value.zig b/src/Value.zig index 55b047f7ca..b757759701 100644 --- a/src/Value.zig +++ b/src/Value.zig @@ -2895,19 +2895,25 @@ pub fn intValueBounds(val: Value, pt: Zcu.PerThread) !?[2]Value { pub const BigIntSpace = InternPool.Key.Int.Storage.BigIntSpace; -pub const zero_usize: Value = .{ .ip_index = .zero_usize }; -pub const zero_u8: Value = .{ .ip_index = .zero_u8 }; -pub const zero_comptime_int: Value = .{ .ip_index = .zero }; -pub const one_comptime_int: Value = .{ .ip_index = .one }; -pub const negative_one_comptime_int: Value = .{ .ip_index = .negative_one }; pub const undef: Value = .{ .ip_index = .undef }; +pub const undef_bool: Value = .{ .ip_index = .undef_bool }; +pub const undef_usize: Value = .{ .ip_index = .undef_usize }; +pub const undef_u1: Value = .{ .ip_index = .undef_u1 }; +pub const zero_comptime_int: Value = .{ .ip_index = .zero }; +pub const zero_usize: Value = .{ .ip_index = .zero_usize }; +pub const zero_u1: Value = .{ .ip_index = .zero_u1 }; +pub const zero_u8: Value = .{ .ip_index = .zero_u8 }; +pub const one_comptime_int: Value = .{ .ip_index = .one }; +pub const one_usize: Value = .{ .ip_index = .one_usize }; +pub const one_u1: Value = .{ .ip_index = .one_u1 }; +pub const one_u8: Value = .{ .ip_index = .one_u8 }; +pub const four_u8: Value = .{ .ip_index = .four_u8 }; +pub const negative_one_comptime_int: Value = .{ .ip_index = .negative_one }; pub const @"void": Value = .{ .ip_index = .void_value }; -pub const @"null": Value = .{ .ip_index = .null_value }; -pub const @"false": Value = .{ .ip_index = .bool_false }; -pub const @"true": Value = .{ .ip_index = .bool_true }; pub const @"unreachable": Value = .{ .ip_index = .unreachable_value }; - -pub const generic_poison_type: Value = .{ .ip_index = .generic_poison_type }; +pub const @"null": Value = .{ .ip_index = .null_value }; +pub const @"true": Value = .{ .ip_index = .bool_true }; +pub const @"false": Value = .{ .ip_index = .bool_false }; pub const empty_tuple: Value = .{ .ip_index = .empty_tuple }; pub fn makeBool(x: bool) Value { diff --git a/src/Zcu.zig b/src/Zcu.zig index c49a1d46b1..20fafb6c4d 100644 --- a/src/Zcu.zig +++ b/src/Zcu.zig @@ -441,8 +441,7 @@ pub const BuiltinDecl = enum { @"panic.castToNull", @"panic.incorrectAlignment", @"panic.invalidErrorCode", - @"panic.castTruncatedData", - @"panic.negativeToUnsigned", + @"panic.integerOutOfBounds", @"panic.integerOverflow", @"panic.shlOverflow", @"panic.shrOverflow", @@ -518,8 +517,7 @@ pub const BuiltinDecl = enum { .@"panic.castToNull", .@"panic.incorrectAlignment", .@"panic.invalidErrorCode", - .@"panic.castTruncatedData", - .@"panic.negativeToUnsigned", + .@"panic.integerOutOfBounds", .@"panic.integerOverflow", .@"panic.shlOverflow", .@"panic.shrOverflow", @@ -585,8 +583,7 @@ pub const SimplePanicId = enum { cast_to_null, incorrect_alignment, invalid_error_code, - cast_truncated_data, - negative_to_unsigned, + integer_out_of_bounds, integer_overflow, shl_overflow, shr_overflow, @@ -609,8 +606,7 @@ pub const SimplePanicId = enum { .cast_to_null => .@"panic.castToNull", .incorrect_alignment => .@"panic.incorrectAlignment", .invalid_error_code => .@"panic.invalidErrorCode", - .cast_truncated_data => .@"panic.castTruncatedData", - .negative_to_unsigned => .@"panic.negativeToUnsigned", + .integer_out_of_bounds => .@"panic.integerOutOfBounds", .integer_overflow => .@"panic.integerOverflow", .shl_overflow => .@"panic.shlOverflow", .shr_overflow => .@"panic.shrOverflow", @@ -3829,26 +3825,8 @@ pub const Feature = enum { is_named_enum_value, error_set_has_value, field_reordering, - /// When this feature is supported, the backend supports the following AIR instructions: - /// * `Air.Inst.Tag.add_safe` - /// * `Air.Inst.Tag.sub_safe` - /// * `Air.Inst.Tag.mul_safe` - /// * `Air.Inst.Tag.intcast_safe` - /// The motivation for this feature is that it makes AIR smaller, and makes it easier - /// to generate better machine code in the backends. All backends should migrate to - /// enabling this feature. - safety_checked_instructions, /// If the backend supports running from another thread. separate_thread, - /// If the backend supports the following AIR instructions with vector types: - /// * `Air.Inst.Tag.bit_and` - /// * `Air.Inst.Tag.bit_or` - /// * `Air.Inst.Tag.bitcast` - /// * `Air.Inst.Tag.float_from_int` - /// * `Air.Inst.Tag.fptrunc` - /// * `Air.Inst.Tag.int_from_float` - /// If not supported, Sema will scalarize the operation. - all_vector_instructions, }; pub fn backendSupportsFeature(zcu: *const Zcu, comptime feature: Feature) bool { diff --git a/src/Zcu/PerThread.zig b/src/Zcu/PerThread.zig index 44abb3cbf3..8e3d07627f 100644 --- a/src/Zcu/PerThread.zig +++ b/src/Zcu/PerThread.zig @@ -1741,10 +1741,11 @@ pub fn linkerUpdateFunc(pt: Zcu.PerThread, func_index: InternPool.Index, air: *A return; } - const backend = target_util.zigBackend(zcu.root_mod.resolved_target.result, zcu.comp.config.use_llvm); - try air.legalize(backend, zcu); + legalize: { + try air.legalize(pt, @import("../codegen.zig").legalizeFeatures(pt, nav_index) orelse break :legalize); + } - var liveness = try Air.Liveness.analyze(gpa, air.*, ip); + var liveness = try Air.Liveness.analyze(zcu, air.*, ip); defer liveness.deinit(gpa); if (build_options.enable_debug_extensions and comp.verbose_air) { @@ -1756,6 +1757,7 @@ pub fn linkerUpdateFunc(pt: Zcu.PerThread, func_index: InternPool.Index, air: *A if (std.debug.runtime_safety) { var verify: Air.Liveness.Verify = .{ .gpa = gpa, + .zcu = zcu, .air = air.*, .liveness = liveness, .intern_pool = ip, @@ -3022,7 +3024,7 @@ fn analyzeFnBodyInner(pt: Zcu.PerThread, func_index: InternPool.Index) Zcu.SemaE // is unused so it just has to be a no-op. sema.air_instructions.set(@intFromEnum(ptr_inst), .{ .tag = .alloc, - .data = .{ .ty = Type.single_const_pointer_to_comptime_int }, + .data = .{ .ty = .ptr_const_comptime_int }, }); } @@ -3843,6 +3845,21 @@ pub fn nullValue(pt: Zcu.PerThread, opt_ty: Type) Allocator.Error!Value { } })); } +/// `ty` is an integer or a vector of integers. +pub fn overflowArithmeticTupleType(pt: Zcu.PerThread, ty: Type) !Type { + const zcu = pt.zcu; + const ip = &zcu.intern_pool; + const ov_ty: Type = if (ty.zigTypeTag(zcu) == .vector) try pt.vectorType(.{ + .len = ty.vectorLen(zcu), + .child = .u1_type, + }) else .u1; + const tuple_ty = try ip.getTupleType(zcu.gpa, pt.tid, .{ + .types = &.{ ty.toIntern(), ov_ty.toIntern() }, + .values = &.{ .none, .none }, + }); + return .fromInterned(tuple_ty); +} + pub fn smallestUnsignedInt(pt: Zcu.PerThread, max: u64) Allocator.Error!Type { return pt.intType(.unsigned, Type.smallestUnsignedBits(max)); } diff --git a/src/arch/aarch64/CodeGen.zig b/src/arch/aarch64/CodeGen.zig index 6fd23cfd18..c01fa24ecc 100644 --- a/src/arch/aarch64/CodeGen.zig +++ b/src/arch/aarch64/CodeGen.zig @@ -40,6 +40,10 @@ const gp = abi.RegisterClass.gp; const InnerError = CodeGenError || error{OutOfRegisters}; +pub fn legalizeFeatures(_: *const std.Target) ?*const Air.Legalize.Features { + return null; +} + gpa: Allocator, pt: Zcu.PerThread, air: Air, @@ -774,7 +778,8 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { .error_name => try self.airErrorName(inst), .splat => try self.airSplat(inst), .select => try self.airSelect(inst), - .shuffle => try self.airShuffle(inst), + .shuffle_one => try self.airShuffleOne(inst), + .shuffle_two => try self.airShuffleTwo(inst), .reduce => try self.airReduce(inst), .aggregate_init => try self.airAggregateInit(inst), .union_init => try self.airUnionInit(inst), @@ -2261,12 +2266,13 @@ fn shiftExact( rhs_ty: Type, maybe_inst: ?Air.Inst.Index, ) InnerError!MCValue { - _ = rhs_ty; - const pt = self.pt; const zcu = pt.zcu; switch (lhs_ty.zigTypeTag(zcu)) { - .vector => return self.fail("TODO binary operations on vectors", .{}), + .vector => if (!rhs_ty.isVector(zcu)) + return self.fail("TODO vector shift with scalar rhs", .{}) + else + return self.fail("TODO binary operations on vectors", .{}), .int => { const int_info = lhs_ty.intInfo(zcu); if (int_info.bits <= 64) { @@ -2317,7 +2323,10 @@ fn shiftNormal( const pt = self.pt; const zcu = pt.zcu; switch (lhs_ty.zigTypeTag(zcu)) { - .vector => return self.fail("TODO binary operations on vectors", .{}), + .vector => if (!rhs_ty.isVector(zcu)) + return self.fail("TODO vector shift with scalar rhs", .{}) + else + return self.fail("TODO binary operations on vectors", .{}), .int => { const int_info = lhs_ty.intInfo(zcu); if (int_info.bits <= 64) { @@ -2874,7 +2883,10 @@ fn airShlWithOverflow(self: *Self, inst: Air.Inst.Index) InnerError!void { const overflow_bit_offset = @as(u32, @intCast(tuple_ty.structFieldOffset(1, zcu))); switch (lhs_ty.zigTypeTag(zcu)) { - .vector => return self.fail("TODO implement shl_with_overflow for vectors", .{}), + .vector => if (!rhs_ty.isVector(zcu)) + return self.fail("TODO implement vector shl_with_overflow with scalar rhs", .{}) + else + return self.fail("TODO implement shl_with_overflow for vectors", .{}), .int => { const int_info = lhs_ty.intInfo(zcu); if (int_info.bits <= 64) { @@ -2993,8 +3005,14 @@ fn airShlWithOverflow(self: *Self, inst: Air.Inst.Index) InnerError!void { } fn airShlSat(self: *Self, inst: Air.Inst.Index) InnerError!void { + const zcu = self.pt.zcu; const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; - const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement shl_sat for {}", .{self.target.cpu.arch}); + const result: MCValue = if (self.liveness.isUnused(inst)) + .dead + else if (self.typeOf(bin_op.lhs).isVector(zcu) and !self.typeOf(bin_op.rhs).isVector(zcu)) + return self.fail("TODO implement vector shl_sat with scalar rhs for {}", .{self.target.cpu.arch}) + else + return self.fail("TODO implement shl_sat for {}", .{self.target.cpu.arch}); return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); } @@ -6032,11 +6050,14 @@ fn airSelect(self: *Self, inst: Air.Inst.Index) InnerError!void { return self.finishAir(inst, result, .{ pl_op.operand, extra.lhs, extra.rhs }); } -fn airShuffle(self: *Self, inst: Air.Inst.Index) InnerError!void { - const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; - const extra = self.air.extraData(Air.Shuffle, ty_pl.payload).data; - const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement airShuffle for {}", .{self.target.cpu.arch}); - return self.finishAir(inst, result, .{ extra.a, extra.b, .none }); +fn airShuffleOne(self: *Self, inst: Air.Inst.Index) InnerError!void { + _ = inst; + return self.fail("TODO implement airShuffleOne for {}", .{self.target.cpu.arch}); +} + +fn airShuffleTwo(self: *Self, inst: Air.Inst.Index) InnerError!void { + _ = inst; + return self.fail("TODO implement airShuffleTwo for {}", .{self.target.cpu.arch}); } fn airReduce(self: *Self, inst: Air.Inst.Index) InnerError!void { diff --git a/src/arch/arm/CodeGen.zig b/src/arch/arm/CodeGen.zig index e9d0e91db1..d687c74c15 100644 --- a/src/arch/arm/CodeGen.zig +++ b/src/arch/arm/CodeGen.zig @@ -41,6 +41,10 @@ const gp = abi.RegisterClass.gp; const InnerError = CodeGenError || error{OutOfRegisters}; +pub fn legalizeFeatures(_: *const std.Target) ?*const Air.Legalize.Features { + return null; +} + gpa: Allocator, pt: Zcu.PerThread, air: Air, @@ -763,7 +767,8 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { .error_name => try self.airErrorName(inst), .splat => try self.airSplat(inst), .select => try self.airSelect(inst), - .shuffle => try self.airShuffle(inst), + .shuffle_one => try self.airShuffleOne(inst), + .shuffle_two => try self.airShuffleTwo(inst), .reduce => try self.airReduce(inst), .aggregate_init => try self.airAggregateInit(inst), .union_init => try self.airUnionInit(inst), @@ -1857,7 +1862,10 @@ fn airShlWithOverflow(self: *Self, inst: Air.Inst.Index) !void { const overflow_bit_offset: u32 = @intCast(tuple_ty.structFieldOffset(1, zcu)); switch (lhs_ty.zigTypeTag(zcu)) { - .vector => return self.fail("TODO implement shl_with_overflow for vectors", .{}), + .vector => if (!rhs_ty.isVector(zcu)) + return self.fail("TODO implement vector shl_with_overflow with scalar rhs", .{}) + else + return self.fail("TODO implement shl_with_overflow for vectors", .{}), .int => { const int_info = lhs_ty.intInfo(zcu); if (int_info.bits <= 32) { @@ -1978,8 +1986,14 @@ fn airShlWithOverflow(self: *Self, inst: Air.Inst.Index) !void { } fn airShlSat(self: *Self, inst: Air.Inst.Index) !void { + const zcu = self.pt.zcu; const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; - const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement shl_sat for {}", .{self.target.cpu.arch}); + const result: MCValue = if (self.liveness.isUnused(inst)) + .dead + else if (self.typeOf(bin_op.lhs).isVector(zcu) and !self.typeOf(bin_op.rhs).isVector(zcu)) + return self.fail("TODO implement vector shl_sat with scalar rhs for {}", .{self.target.cpu.arch}) + else + return self.fail("TODO implement shl_sat for {}", .{self.target.cpu.arch}); return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); } @@ -3788,7 +3802,10 @@ fn shiftExact( const pt = self.pt; const zcu = pt.zcu; switch (lhs_ty.zigTypeTag(zcu)) { - .vector => return self.fail("TODO ARM binary operations on vectors", .{}), + .vector => if (!rhs_ty.isVector(zcu)) + return self.fail("TODO ARM vector shift with scalar rhs", .{}) + else + return self.fail("TODO ARM binary operations on vectors", .{}), .int => { const int_info = lhs_ty.intInfo(zcu); if (int_info.bits <= 32) { @@ -3828,7 +3845,10 @@ fn shiftNormal( const pt = self.pt; const zcu = pt.zcu; switch (lhs_ty.zigTypeTag(zcu)) { - .vector => return self.fail("TODO ARM binary operations on vectors", .{}), + .vector => if (!rhs_ty.isVector(zcu)) + return self.fail("TODO ARM vector shift with scalar rhs", .{}) + else + return self.fail("TODO ARM binary operations on vectors", .{}), .int => { const int_info = lhs_ty.intInfo(zcu); if (int_info.bits <= 32) { @@ -6002,10 +6022,14 @@ fn airSelect(self: *Self, inst: Air.Inst.Index) !void { return self.finishAir(inst, result, .{ pl_op.operand, extra.lhs, extra.rhs }); } -fn airShuffle(self: *Self, inst: Air.Inst.Index) !void { - const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; - const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement airShuffle for arm", .{}); - return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); +fn airShuffleOne(self: *Self, inst: Air.Inst.Index) !void { + _ = inst; + return self.fail("TODO implement airShuffleOne for arm", .{}); +} + +fn airShuffleTwo(self: *Self, inst: Air.Inst.Index) !void { + _ = inst; + return self.fail("TODO implement airShuffleTwo for arm", .{}); } fn airReduce(self: *Self, inst: Air.Inst.Index) !void { diff --git a/src/arch/powerpc/CodeGen.zig b/src/arch/powerpc/CodeGen.zig index 6334b65ff8..0cfee67ebd 100644 --- a/src/arch/powerpc/CodeGen.zig +++ b/src/arch/powerpc/CodeGen.zig @@ -10,6 +10,10 @@ const Zcu = @import("../../Zcu.zig"); const assert = std.debug.assert; const log = std.log.scoped(.codegen); +pub fn legalizeFeatures(_: *const std.Target) ?*const Air.Legalize.Features { + return null; +} + pub fn generate( bin_file: *link.File, pt: Zcu.PerThread, diff --git a/src/arch/riscv64/CodeGen.zig b/src/arch/riscv64/CodeGen.zig index edba985beb..1d17d34189 100644 --- a/src/arch/riscv64/CodeGen.zig +++ b/src/arch/riscv64/CodeGen.zig @@ -51,6 +51,15 @@ const Instruction = encoding.Instruction; const InnerError = CodeGenError || error{OutOfRegisters}; +pub fn legalizeFeatures(_: *const std.Target) *const Air.Legalize.Features { + return comptime &.initMany(&.{ + .expand_intcast_safe, + .expand_add_safe, + .expand_sub_safe, + .expand_mul_safe, + }); +} + pt: Zcu.PerThread, air: Air, liveness: Air.Liveness, @@ -1577,7 +1586,8 @@ fn genBody(func: *Func, body: []const Air.Inst.Index) InnerError!void { .error_name => try func.airErrorName(inst), .splat => try func.airSplat(inst), .select => try func.airSelect(inst), - .shuffle => try func.airShuffle(inst), + .shuffle_one => try func.airShuffleOne(inst), + .shuffle_two => try func.airShuffleTwo(inst), .reduce => try func.airReduce(inst), .aggregate_init => try func.airAggregateInit(inst), .union_init => try func.airUnionInit(inst), @@ -2764,6 +2774,7 @@ fn genBinOp( .shl, .shl_exact, => { + if (lhs_ty.isVector(zcu) and !rhs_ty.isVector(zcu)) return func.fail("TODO: vector shift with scalar rhs", .{}); if (bit_size > 64) return func.fail("TODO: genBinOp shift > 64 bits, {}", .{bit_size}); try func.truncateRegister(rhs_ty, rhs_reg); @@ -3248,8 +3259,14 @@ fn airMulWithOverflow(func: *Func, inst: Air.Inst.Index) !void { } fn airShlWithOverflow(func: *Func, inst: Air.Inst.Index) !void { + const zcu = func.pt.zcu; const bin_op = func.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; - const result: MCValue = if (func.liveness.isUnused(inst)) .unreach else return func.fail("TODO implement airShlWithOverflow", .{}); + const result: MCValue = if (func.liveness.isUnused(inst)) + .unreach + else if (func.typeOf(bin_op.lhs).isVector(zcu) and !func.typeOf(bin_op.rhs).isVector(zcu)) + return func.fail("TODO implement vector airShlWithOverflow with scalar rhs", .{}) + else + return func.fail("TODO implement airShlWithOverflow", .{}); return func.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); } @@ -3266,8 +3283,14 @@ fn airMulSat(func: *Func, inst: Air.Inst.Index) !void { } fn airShlSat(func: *Func, inst: Air.Inst.Index) !void { + const zcu = func.pt.zcu; const bin_op = func.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; - const result: MCValue = if (func.liveness.isUnused(inst)) .unreach else return func.fail("TODO implement airShlSat", .{}); + const result: MCValue = if (func.liveness.isUnused(inst)) + .unreach + else if (func.typeOf(bin_op.lhs).isVector(zcu) and !func.typeOf(bin_op.rhs).isVector(zcu)) + return func.fail("TODO implement vector airShlSat with scalar rhs", .{}) + else + return func.fail("TODO implement airShlSat", .{}); return func.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); } @@ -8008,10 +8031,14 @@ fn airSelect(func: *Func, inst: Air.Inst.Index) !void { return func.finishAir(inst, result, .{ pl_op.operand, extra.lhs, extra.rhs }); } -fn airShuffle(func: *Func, inst: Air.Inst.Index) !void { - const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; - const result: MCValue = if (func.liveness.isUnused(inst)) .unreach else return func.fail("TODO implement airShuffle for riscv64", .{}); - return func.finishAir(inst, result, .{ ty_op.operand, .none, .none }); +fn airShuffleOne(func: *Func, inst: Air.Inst.Index) !void { + _ = inst; + return func.fail("TODO implement airShuffleOne for riscv64", .{}); +} + +fn airShuffleTwo(func: *Func, inst: Air.Inst.Index) !void { + _ = inst; + return func.fail("TODO implement airShuffleTwo for riscv64", .{}); } fn airReduce(func: *Func, inst: Air.Inst.Index) !void { diff --git a/src/arch/sparc64/CodeGen.zig b/src/arch/sparc64/CodeGen.zig index 4235de94f5..439e5e6dbb 100644 --- a/src/arch/sparc64/CodeGen.zig +++ b/src/arch/sparc64/CodeGen.zig @@ -41,6 +41,10 @@ const Self = @This(); const InnerError = CodeGenError || error{OutOfRegisters}; +pub fn legalizeFeatures(_: *const std.Target) ?*const Air.Legalize.Features { + return null; +} + const RegisterView = enum(u1) { caller, callee, @@ -617,7 +621,8 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { .error_name => try self.airErrorName(inst), .splat => try self.airSplat(inst), .select => @panic("TODO try self.airSelect(inst)"), - .shuffle => @panic("TODO try self.airShuffle(inst)"), + .shuffle_one => @panic("TODO try self.airShuffleOne(inst)"), + .shuffle_two => @panic("TODO try self.airShuffleTwo(inst)"), .reduce => @panic("TODO try self.airReduce(inst)"), .aggregate_init => try self.airAggregateInit(inst), .union_init => try self.airUnionInit(inst), @@ -2270,8 +2275,14 @@ fn airSetUnionTag(self: *Self, inst: Air.Inst.Index) !void { } fn airShlSat(self: *Self, inst: Air.Inst.Index) !void { + const zcu = self.pt.zcu; const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; - const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement shl_sat for {}", .{self.target.cpu.arch}); + const result: MCValue = if (self.liveness.isUnused(inst)) + .dead + else if (self.typeOf(bin_op.lhs).isVector(zcu) and !self.typeOf(bin_op.rhs).isVector(zcu)) + return self.fail("TODO implement vector shl_sat with scalar rhs for {}", .{self.target.cpu.arch}) + else + return self.fail("TODO implement shl_sat for {}", .{self.target.cpu.arch}); return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); } @@ -2287,7 +2298,10 @@ fn airShlWithOverflow(self: *Self, inst: Air.Inst.Index) !void { const rhs_ty = self.typeOf(extra.rhs); switch (lhs_ty.zigTypeTag(zcu)) { - .vector => return self.fail("TODO implement mul_with_overflow for vectors", .{}), + .vector => if (!rhs_ty.isVector(zcu)) + return self.fail("TODO implement vector shl_with_overflow with scalar rhs", .{}) + else + return self.fail("TODO implement mul_with_overflow for vectors", .{}), .int => { const int_info = lhs_ty.intInfo(zcu); if (int_info.bits <= 64) { @@ -3002,7 +3016,10 @@ fn binOp( // Truncate if necessary switch (lhs_ty.zigTypeTag(zcu)) { - .vector => return self.fail("TODO binary operations on vectors", .{}), + .vector => if (rhs_ty.isVector(zcu)) + return self.fail("TODO vector shift with scalar rhs", .{}) + else + return self.fail("TODO binary operations on vectors", .{}), .int => { const int_info = lhs_ty.intInfo(zcu); if (int_info.bits <= 64) { @@ -3024,7 +3041,10 @@ fn binOp( .shr_exact, => { switch (lhs_ty.zigTypeTag(zcu)) { - .vector => return self.fail("TODO binary operations on vectors", .{}), + .vector => if (rhs_ty.isVector(zcu)) + return self.fail("TODO vector shift with scalar rhs", .{}) + else + return self.fail("TODO binary operations on vectors", .{}), .int => { const int_info = lhs_ty.intInfo(zcu); if (int_info.bits <= 64) { diff --git a/src/arch/wasm/CodeGen.zig b/src/arch/wasm/CodeGen.zig index a48f7012f5..55a61088d0 100644 --- a/src/arch/wasm/CodeGen.zig +++ b/src/arch/wasm/CodeGen.zig @@ -31,6 +31,15 @@ const libcFloatSuffix = target_util.libcFloatSuffix; const compilerRtFloatAbbrev = target_util.compilerRtFloatAbbrev; const compilerRtIntAbbrev = target_util.compilerRtIntAbbrev; +pub fn legalizeFeatures(_: *const std.Target) *const Air.Legalize.Features { + return comptime &.initMany(&.{ + .expand_intcast_safe, + .expand_add_safe, + .expand_sub_safe, + .expand_mul_safe, + }); +} + /// Reference to the function declaration the code /// section belongs to owner_nav: InternPool.Nav.Index, @@ -1995,7 +2004,8 @@ fn genInst(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void { .ret_load => cg.airRetLoad(inst), .splat => cg.airSplat(inst), .select => cg.airSelect(inst), - .shuffle => cg.airShuffle(inst), + .shuffle_one => cg.airShuffleOne(inst), + .shuffle_two => cg.airShuffleTwo(inst), .reduce => cg.airReduce(inst), .aggregate_init => cg.airAggregateInit(inst), .union_init => cg.airUnionInit(inst), @@ -2638,6 +2648,10 @@ fn airBinOp(cg: *CodeGen, inst: Air.Inst.Index, op: Op) InnerError!void { // For big integers we can ignore this as we will call into compiler-rt which handles this. const result = switch (op) { .shr, .shl => result: { + if (lhs_ty.isVector(zcu) and !rhs_ty.isVector(zcu)) { + return cg.fail("TODO: implement vector '{s}' with scalar rhs", .{@tagName(op)}); + } + const lhs_wasm_bits = toWasmBits(@intCast(lhs_ty.bitSize(zcu))) orelse { return cg.fail("TODO: implement '{s}' for types larger than 128 bits", .{@tagName(op)}); }; @@ -3055,8 +3069,12 @@ fn airWrapBinOp(cg: *CodeGen, inst: Air.Inst.Index, op: Op) InnerError!void { const lhs_ty = cg.typeOf(bin_op.lhs); const rhs_ty = cg.typeOf(bin_op.rhs); - if (lhs_ty.zigTypeTag(zcu) == .vector or rhs_ty.zigTypeTag(zcu) == .vector) { - return cg.fail("TODO: Implement wrapping arithmetic for vectors", .{}); + if (lhs_ty.isVector(zcu)) { + if ((op == .shr or op == .shl) and !rhs_ty.isVector(zcu)) { + return cg.fail("TODO: implement wrapping vector '{s}' with scalar rhs", .{@tagName(op)}); + } else { + return cg.fail("TODO: implement wrapping '{s}' for vectors", .{@tagName(op)}); + } } // For certain operations, such as shifting, the types are different. @@ -5160,66 +5178,105 @@ fn airSelect(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void { return cg.fail("TODO: Implement wasm airSelect", .{}); } -fn airShuffle(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void { +fn airShuffleOne(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void { const pt = cg.pt; const zcu = pt.zcu; - const inst_ty = cg.typeOfIndex(inst); - const ty_pl = cg.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; - const extra = cg.air.extraData(Air.Shuffle, ty_pl.payload).data; - const a = try cg.resolveInst(extra.a); - const b = try cg.resolveInst(extra.b); - const mask = Value.fromInterned(extra.mask); - const mask_len = extra.mask_len; + const unwrapped = cg.air.unwrapShuffleOne(zcu, inst); + const result_ty = unwrapped.result_ty; + const mask = unwrapped.mask; + const operand = try cg.resolveInst(unwrapped.operand); - const child_ty = inst_ty.childType(zcu); - const elem_size = child_ty.abiSize(zcu); + const elem_ty = result_ty.childType(zcu); + const elem_size = elem_ty.abiSize(zcu); - // TODO: One of them could be by ref; handle in loop - if (isByRef(cg.typeOf(extra.a), zcu, cg.target) or isByRef(inst_ty, zcu, cg.target)) { - const result = try cg.allocStack(inst_ty); + // TODO: this function could have an `i8x16_shuffle` fast path like `airShuffleTwo` if we were + // to lower the comptime-known operands to a non-by-ref vector value. - for (0..mask_len) |index| { - const value = (try mask.elemValue(pt, index)).toSignedInt(zcu); + // TODO: this is incorrect if either operand or the result is *not* by-ref, which is possible. + // I tried to fix it, but I couldn't make much sense of how this backend handles memory. + if (!isByRef(result_ty, zcu, cg.target) or + !isByRef(cg.typeOf(unwrapped.operand), zcu, cg.target)) return cg.fail("TODO: handle mixed by-ref shuffle", .{}); - try cg.emitWValue(result); + const dest_alloc = try cg.allocStack(result_ty); + for (mask, 0..) |mask_elem, out_idx| { + try cg.emitWValue(dest_alloc); + const elem_val = switch (mask_elem.unwrap()) { + .elem => |idx| try cg.load(operand, elem_ty, @intCast(elem_size * idx)), + .value => |val| try cg.lowerConstant(.fromInterned(val), elem_ty), + }; + try cg.store(.stack, elem_val, elem_ty, @intCast(dest_alloc.offset() + elem_size * out_idx)); + } + return cg.finishAir(inst, dest_alloc, &.{unwrapped.operand}); +} - const loaded = if (value >= 0) - try cg.load(a, child_ty, @as(u32, @intCast(@as(i64, @intCast(elem_size)) * value))) - else - try cg.load(b, child_ty, @as(u32, @intCast(@as(i64, @intCast(elem_size)) * ~value))); +fn airShuffleTwo(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const pt = cg.pt; + const zcu = pt.zcu; - try cg.store(.stack, loaded, child_ty, result.stack_offset.value + @as(u32, @intCast(elem_size)) * @as(u32, @intCast(index))); - } + const unwrapped = cg.air.unwrapShuffleTwo(zcu, inst); + const result_ty = unwrapped.result_ty; + const mask = unwrapped.mask; + const operand_a = try cg.resolveInst(unwrapped.operand_a); + const operand_b = try cg.resolveInst(unwrapped.operand_b); - return cg.finishAir(inst, result, &.{ extra.a, extra.b }); - } else { - var operands = [_]u32{ - @intFromEnum(std.wasm.SimdOpcode.i8x16_shuffle), - } ++ [1]u32{undefined} ** 4; + const a_ty = cg.typeOf(unwrapped.operand_a); + const b_ty = cg.typeOf(unwrapped.operand_b); + const elem_ty = result_ty.childType(zcu); + const elem_size = elem_ty.abiSize(zcu); - var lanes = mem.asBytes(operands[1..]); - for (0..@as(usize, @intCast(mask_len))) |index| { - const mask_elem = (try mask.elemValue(pt, index)).toSignedInt(zcu); - const base_index = if (mask_elem >= 0) - @as(u8, @intCast(@as(i64, @intCast(elem_size)) * mask_elem)) - else - 16 + @as(u8, @intCast(@as(i64, @intCast(elem_size)) * ~mask_elem)); - - for (0..@as(usize, @intCast(elem_size))) |byte_offset| { - lanes[index * @as(usize, @intCast(elem_size)) + byte_offset] = base_index + @as(u8, @intCast(byte_offset)); + // WASM has `i8x16_shuffle`, which we can apply if the element type bit size is a multiple of 8 + // and the input and output vectors have a bit size of 128 (and are hence not by-ref). Otherwise, + // we fall back to a naive loop lowering. + if (!isByRef(a_ty, zcu, cg.target) and + !isByRef(b_ty, zcu, cg.target) and + !isByRef(result_ty, zcu, cg.target) and + elem_ty.bitSize(zcu) % 8 == 0) + { + var lane_map: [16]u8 align(4) = undefined; + const lanes_per_elem: usize = @intCast(elem_ty.bitSize(zcu) / 8); + for (mask, 0..) |mask_elem, out_idx| { + const out_first_lane = out_idx * lanes_per_elem; + const in_first_lane = switch (mask_elem.unwrap()) { + .a_elem => |i| i * lanes_per_elem, + .b_elem => |i| i * lanes_per_elem + 16, + .undef => 0, // doesn't matter + }; + for (lane_map[out_first_lane..][0..lanes_per_elem], in_first_lane..) |*out, in| { + out.* = @intCast(in); } } - - try cg.emitWValue(a); - try cg.emitWValue(b); - + try cg.emitWValue(operand_a); + try cg.emitWValue(operand_b); const extra_index = cg.extraLen(); - try cg.mir_extra.appendSlice(cg.gpa, &operands); + try cg.mir_extra.appendSlice(cg.gpa, &.{ + @intFromEnum(std.wasm.SimdOpcode.i8x16_shuffle), + @bitCast(lane_map[0..4].*), + @bitCast(lane_map[4..8].*), + @bitCast(lane_map[8..12].*), + @bitCast(lane_map[12..].*), + }); try cg.addInst(.{ .tag = .simd_prefix, .data = .{ .payload = extra_index } }); - - return cg.finishAir(inst, .stack, &.{ extra.a, extra.b }); + return cg.finishAir(inst, .stack, &.{ unwrapped.operand_a, unwrapped.operand_b }); } + + // TODO: this is incorrect if either operand or the result is *not* by-ref, which is possible. + // I tried to fix it, but I couldn't make much sense of how this backend handles memory. + if (!isByRef(result_ty, zcu, cg.target) or + !isByRef(a_ty, zcu, cg.target) or + !isByRef(b_ty, zcu, cg.target)) return cg.fail("TODO: handle mixed by-ref shuffle", .{}); + + const dest_alloc = try cg.allocStack(result_ty); + for (mask, 0..) |mask_elem, out_idx| { + try cg.emitWValue(dest_alloc); + const elem_val = switch (mask_elem.unwrap()) { + .a_elem => |idx| try cg.load(operand_a, elem_ty, @intCast(elem_size * idx)), + .b_elem => |idx| try cg.load(operand_b, elem_ty, @intCast(elem_size * idx)), + .undef => try cg.emitUndefined(elem_ty), + }; + try cg.store(.stack, elem_val, elem_ty, @intCast(dest_alloc.offset() + elem_size * out_idx)); + } + return cg.finishAir(inst, dest_alloc, &.{ unwrapped.operand_a, unwrapped.operand_b }); } fn airReduce(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void { @@ -6067,13 +6124,17 @@ fn airShlWithOverflow(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void { const ty = cg.typeOf(extra.lhs); const rhs_ty = cg.typeOf(extra.rhs); - if (ty.zigTypeTag(zcu) == .vector) { - return cg.fail("TODO: Implement overflow arithmetic for vectors", .{}); + if (ty.isVector(zcu)) { + if (!rhs_ty.isVector(zcu)) { + return cg.fail("TODO: implement vector 'shl_with_overflow' with scalar rhs", .{}); + } else { + return cg.fail("TODO: implement vector 'shl_with_overflow'", .{}); + } } const int_info = ty.intInfo(zcu); const wasm_bits = toWasmBits(int_info.bits) orelse { - return cg.fail("TODO: Implement shl_with_overflow for integer bitsize: {d}", .{int_info.bits}); + return cg.fail("TODO: implement 'shl_with_overflow' for integer bitsize: {d}", .{int_info.bits}); }; // Ensure rhs is coerced to lhs as they must have the same WebAssembly types @@ -6994,6 +7055,11 @@ fn airShlSat(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void { const pt = cg.pt; const zcu = pt.zcu; + + if (cg.typeOf(bin_op.lhs).isVector(zcu) and !cg.typeOf(bin_op.rhs).isVector(zcu)) { + return cg.fail("TODO: implement vector 'shl_sat' with scalar rhs", .{}); + } + const ty = cg.typeOfIndex(inst); const int_info = ty.intInfo(zcu); const is_signed = int_info.signedness == .signed; diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig index 0b2a17d192..392f9089a7 100644 --- a/src/arch/x86_64/CodeGen.zig +++ b/src/arch/x86_64/CodeGen.zig @@ -32,10 +32,79 @@ const FrameIndex = bits.FrameIndex; const InnerError = codegen.CodeGenError || error{OutOfRegisters}; -pub const legalize_features: Air.Legalize.Features = .{ - .remove_shift_vector_rhs_splat = false, - .reduce_one_elem_to_bitcast = true, -}; +pub fn legalizeFeatures(target: *const std.Target) *const Air.Legalize.Features { + @setEvalBranchQuota(1_200); + return switch (target.ofmt == .coff) { + inline false, true => |use_old| comptime &.init(.{ + .scalarize_add = use_old, + .scalarize_add_sat = use_old, + .scalarize_sub = use_old, + .scalarize_sub_sat = use_old, + .scalarize_mul = use_old, + .scalarize_mul_wrap = use_old, + .scalarize_mul_sat = true, + .scalarize_div_float = use_old, + .scalarize_div_float_optimized = use_old, + .scalarize_div_trunc = use_old, + .scalarize_div_trunc_optimized = use_old, + .scalarize_div_floor = use_old, + .scalarize_div_floor_optimized = use_old, + .scalarize_div_exact = use_old, + .scalarize_div_exact_optimized = use_old, + .scalarize_max = use_old, + .scalarize_min = use_old, + .scalarize_add_with_overflow = true, + .scalarize_sub_with_overflow = true, + .scalarize_mul_with_overflow = true, + .scalarize_shl_with_overflow = true, + .scalarize_bit_and = use_old, + .scalarize_bit_or = use_old, + .scalarize_shr = true, + .scalarize_shr_exact = true, + .scalarize_shl = true, + .scalarize_shl_exact = true, + .scalarize_shl_sat = true, + .scalarize_xor = use_old, + .scalarize_not = use_old, + .scalarize_clz = use_old, + .scalarize_ctz = true, + .scalarize_popcount = true, + .scalarize_byte_swap = true, + .scalarize_bit_reverse = true, + .scalarize_sin = use_old, + .scalarize_cos = use_old, + .scalarize_tan = use_old, + .scalarize_exp = use_old, + .scalarize_exp2 = use_old, + .scalarize_log = use_old, + .scalarize_log2 = use_old, + .scalarize_log10 = use_old, + .scalarize_abs = use_old, + .scalarize_floor = use_old, + .scalarize_ceil = use_old, + .scalarize_trunc_float = use_old, + .scalarize_cmp_vector = true, + .scalarize_cmp_vector_optimized = true, + .scalarize_fptrunc = use_old, + .scalarize_fpext = use_old, + .scalarize_intcast = use_old, + .scalarize_int_from_float = use_old, + .scalarize_int_from_float_optimized = use_old, + .scalarize_float_from_int = use_old, + .scalarize_shuffle_one = true, + .scalarize_shuffle_two = true, + .scalarize_select = true, + .scalarize_mul_add = use_old, + + .unsplat_shift_rhs = false, + .reduce_one_elem_to_bitcast = true, + .expand_intcast_safe = true, + .expand_add_safe = true, + .expand_sub_safe = true, + .expand_mul_safe = true, + }), + }; +} /// Set this to `false` to uncover Sema OPV bugs. /// https://github.com/ziglang/zig/issues/22419 @@ -218,7 +287,7 @@ pub const MCValue = union(enum) { /// Payload is a frame address. lea_frame: bits.FrameAddr, /// Supports integer_per_element abi - elementwise_regs_then_frame: packed struct { regs: u3, frame_off: i29, frame_index: FrameIndex }, + elementwise_args: packed struct { regs: u3, frame_off: i29, frame_index: FrameIndex }, /// This indicates that we have already allocated a frame index for this instruction, /// but it has not been spilled there yet in the current control flow. /// Payload is a frame index. @@ -240,7 +309,7 @@ pub const MCValue = union(enum) { .lea_direct, .lea_got, .lea_frame, - .elementwise_regs_then_frame, + .elementwise_args, .reserved_frame, .air_ref, => false, @@ -355,7 +424,7 @@ pub const MCValue = union(enum) { .lea_direct, .lea_got, .lea_frame, - .elementwise_regs_then_frame, + .elementwise_args, .reserved_frame, .air_ref, => unreachable, // not in memory @@ -389,7 +458,7 @@ pub const MCValue = union(enum) { .load_got, .load_frame, .load_symbol, - .elementwise_regs_then_frame, + .elementwise_args, .reserved_frame, .air_ref, => unreachable, // not dereferenceable @@ -409,7 +478,7 @@ pub const MCValue = union(enum) { .unreach, .dead, .undef, - .elementwise_regs_then_frame, + .elementwise_args, .reserved_frame, .air_ref, => unreachable, // not valid @@ -463,7 +532,7 @@ pub const MCValue = union(enum) { .load_got, .lea_got, .lea_frame, - .elementwise_regs_then_frame, + .elementwise_args, .reserved_frame, .lea_symbol, => unreachable, @@ -547,7 +616,7 @@ pub const MCValue = union(enum) { .load_got => |pl| try writer.print("[got:{d}]", .{pl}), .lea_got => |pl| try writer.print("got:{d}", .{pl}), .load_frame => |pl| try writer.print("[{} + 0x{x}]", .{ pl.index, pl.off }), - .elementwise_regs_then_frame => |pl| try writer.print("elementwise:{d}:[{} + 0x{x}]", .{ + .elementwise_args => |pl| try writer.print("elementwise:{d}:[{} + 0x{x}]", .{ pl.regs, pl.frame_index, pl.frame_off, }), .lea_frame => |pl| try writer.print("{} + 0x{x}", .{ pl.index, pl.off }), @@ -580,7 +649,7 @@ const InstTracking = struct { .lea_symbol, => result, .dead, - .elementwise_regs_then_frame, + .elementwise_args, .reserved_frame, .air_ref, => unreachable, @@ -689,7 +758,7 @@ const InstTracking = struct { .register_overflow, .register_mask, .indirect, - .elementwise_regs_then_frame, + .elementwise_args, .air_ref, => unreachable, } @@ -2239,11 +2308,17 @@ fn gen(self: *CodeGen) InnerError!void { try self.genBody(self.air.getMainBody()); const epilogue = if (self.epilogue_relocs.items.len > 0) epilogue: { - const epilogue_relocs_last_index = self.epilogue_relocs.items.len - 1; - for (if (self.epilogue_relocs.items[epilogue_relocs_last_index] == self.mir_instructions.len - 1) epilogue_relocs: { - _ = self.mir_instructions.pop(); - break :epilogue_relocs self.epilogue_relocs.items[0..epilogue_relocs_last_index]; - } else self.epilogue_relocs.items) |epilogue_reloc| self.performReloc(epilogue_reloc); + var last_inst: Mir.Inst.Index = @intCast(self.mir_instructions.len - 1); + while (self.epilogue_relocs.getLastOrNull() == last_inst) { + self.epilogue_relocs.items.len -= 1; + self.mir_instructions.set(last_inst, .{ + .tag = .pseudo, + .ops = .pseudo_dead_none, + .data = undefined, + }); + last_inst -= 1; + } + for (self.epilogue_relocs.items) |epilogue_reloc| self.performReloc(epilogue_reloc); if (self.debug_output != .none) try self.asmPseudo(.pseudo_dbg_epilogue_begin_none); const backpatch_stack_dealloc = try self.asmPlaceholder(); @@ -2430,7 +2505,7 @@ fn genBody(cg: *CodeGen, body: []const Air.Inst.Index) InnerError!void { switch (air_tags[@intFromEnum(inst)]) { // zig fmt: off .select => try cg.airSelect(inst), - .shuffle => try cg.airShuffle(inst), + .shuffle_one, .shuffle_two => @panic("x86_64 TODO: shuffle_one/shuffle_two"), // zig fmt: on .arg => if (cg.debug_output != .none) { @@ -5714,7 +5789,7 @@ fn genBody(cg: *CodeGen, body: []const Air.Inst.Index) InnerError!void { }, .extra_temps = .{ .{ .type = .i64, .kind = .{ .rc = .general_purpose } }, - .{ .type = .i64, .kind = .{ .mut_rc = .{ .ref = .src1, .rc = .general_purpose } } }, + .{ .type = .i64, .kind = .{ .rc = .general_purpose } }, .unused, .unused, .unused, @@ -63352,14 +63427,14 @@ fn genBody(cg: *CodeGen, body: []const Air.Inst.Index) InnerError!void { defer assert(cg.loops.remove(inst)); try cg.genBodyBlock(@ptrCast(cg.air.extra.items[block.end..][0..block.data.body_len])); }, - .repeat => if (use_old) try cg.airRepeat(inst) else { + .repeat => { const repeat = air_datas[@intFromEnum(inst)].repeat; const loop = cg.loops.get(repeat.loop_inst).?; try cg.restoreState(loop.state, &.{}, .{ .emit_instructions = true, .update_tracking = false, .resurrect = false, - .close_scope = true, + .close_scope = false, }); _ = try cg.asmJmpReloc(loop.target); }, @@ -77234,11 +77309,27 @@ fn genBody(cg: *CodeGen, body: []const Air.Inst.Index) InnerError!void { }, } }, - .int => res[0] = ops[0].cmpInts(cmp_op, &ops[1], cg) catch |err| break :err err, + .int => { + switch (ty.zigTypeTag(zcu)) { + else => {}, + .@"struct", .@"union" => { + assert(ty.containerLayout(zcu) == .@"packed"); + for (&ops) |*op| op.wrapInt(cg) catch |err| switch (err) { + error.SelectFailed => return cg.fail("failed to select {s} wrap {} {}", .{ + @tagName(air_tag), + ty.fmt(pt), + op.tracking(cg), + }), + else => |e| return e, + }; + }, + } + res[0] = ops[0].cmpInts(cmp_op, &ops[1], cg) catch |err| break :err err; + }, }) catch |err| switch (err) { error.SelectFailed => return cg.fail("failed to select {s} {} {} {}", .{ @tagName(air_tag), - cg.typeOf(bin_op.lhs).fmt(pt), + ty.fmt(pt), ops[0].tracking(cg), ops[1].tracking(cg), }), @@ -92468,7 +92559,7 @@ fn genBody(cg: *CodeGen, body: []const Air.Inst.Index) InnerError!void { .{ ._, ._, .lea, .tmp1p, .mem(.dst0), ._, ._ }, .{ ._, ._, .mov, .tmp2d, .sia(-2, .dst0, .add_size_div_8), ._, ._ }, .{ ._, .@"rep _sq", .mov, ._, ._, ._, ._ }, - .{ ._, ._, .mov, .tmp0q, .memad(.src0q, .add_size, -16), ._, ._ }, + .{ ._, ._, .mov, .tmp0q, .memad(.src0q, .add_dst0_size, -16), ._, ._ }, .{ ._, ._, .mov, .memad(.dst0q, .add_size, -16), .tmp0q, ._, ._ }, .{ ._, ._r, .sa, .tmp0q, .ui(63), ._, ._ }, .{ ._, ._, .mov, .memad(.dst0q, .add_size, -8), .tmp0q, ._, ._ }, @@ -92500,7 +92591,7 @@ fn genBody(cg: *CodeGen, body: []const Air.Inst.Index) InnerError!void { .{ ._, ._, .lea, .tmp1p, .mem(.dst0), ._, ._ }, .{ ._, ._, .mov, .tmp2d, .sia(-2, .dst0, .add_size_div_8), ._, ._ }, .{ ._, .@"rep _sq", .mov, ._, ._, ._, ._ }, - .{ ._, ._, .mov, .tmp0q, .memad(.src0q, .add_size, -16), ._, ._ }, + .{ ._, ._, .mov, .tmp0q, .memad(.src0q, .add_dst0_size, -16), ._, ._ }, .{ ._, ._l, .sa, .tmp0q, .uia(64, .dst0, .sub_bit_size_rem_64), ._, ._ }, .{ ._, ._r, .sa, .tmp0q, .uia(64, .dst0, .sub_bit_size_rem_64), ._, ._ }, .{ ._, ._, .mov, .memad(.dst0q, .add_size, -16), .tmp0q, ._, ._ }, @@ -92534,7 +92625,7 @@ fn genBody(cg: *CodeGen, body: []const Air.Inst.Index) InnerError!void { .{ ._, ._, .lea, .tmp1p, .mem(.dst0), ._, ._ }, .{ ._, ._, .mov, .tmp2d, .sia(-1, .dst0, .add_size_div_8), ._, ._ }, .{ ._, .@"rep _sq", .mov, ._, ._, ._, ._ }, - .{ ._, ._, .mov, .tmp0q, .memad(.src0q, .add_size, -8), ._, ._ }, + .{ ._, ._, .mov, .tmp0q, .memad(.src0q, .add_dst0_size, -8), ._, ._ }, .{ ._, ._l, .sa, .tmp0q, .uia(64, .dst0, .sub_bit_size_rem_64), ._, ._ }, .{ ._, ._r, .sa, .tmp0q, .uia(64, .dst0, .sub_bit_size_rem_64), ._, ._ }, .{ ._, ._, .mov, .memad(.dst0q, .add_size, -8), .tmp0q, ._, ._ }, @@ -92595,7 +92686,7 @@ fn genBody(cg: *CodeGen, body: []const Air.Inst.Index) InnerError!void { .{ ._, ._, .mov, .tmp2d, .sia(-2, .dst0, .add_size_div_8), ._, ._ }, .{ ._, .@"rep _sq", .mov, ._, ._, ._, ._ }, .{ ._, ._, .mov, .tmp2d, .sa(.dst0, .add_bit_size_rem_64), ._, ._ }, - .{ ._, ._, .bzhi, .tmp2q, .memad(.src0q, .add_size, -16), .tmp2q, ._ }, + .{ ._, ._, .bzhi, .tmp2q, .memad(.src0q, .add_dst0_size, -16), .tmp2q, ._ }, .{ ._, ._, .mov, .memad(.dst0q, .add_size, -16), .tmp2q, ._, ._ }, .{ ._, ._, .mov, .memad(.dst0q, .add_size, -8), .si(0), ._, ._ }, } }, @@ -92627,7 +92718,7 @@ fn genBody(cg: *CodeGen, body: []const Air.Inst.Index) InnerError!void { .{ ._, ._, .mov, .tmp2d, .sia(-1, .dst0, .add_size_div_8), ._, ._ }, .{ ._, .@"rep _sq", .mov, ._, ._, ._, ._ }, .{ ._, ._, .mov, .tmp2d, .sa(.dst0, .add_bit_size_rem_64), ._, ._ }, - .{ ._, ._, .bzhi, .tmp2q, .memad(.src0q, .add_size, -8), .tmp2q, ._ }, + .{ ._, ._, .bzhi, .tmp2q, .memad(.src0q, .add_dst0_size, -8), .tmp2q, ._ }, .{ ._, ._, .mov, .memad(.dst0q, .add_size, -8), .tmp2q, ._, ._ }, } }, }, .{ @@ -92658,7 +92749,7 @@ fn genBody(cg: *CodeGen, body: []const Air.Inst.Index) InnerError!void { .{ ._, ._, .mov, .tmp2d, .sia(-2, .dst0, .add_size_div_8), ._, ._ }, .{ ._, .@"rep _sq", .mov, ._, ._, ._, ._ }, .{ ._, ._, .mov, .tmp0q, .ua(.dst0, .add_umax), ._, ._ }, - .{ ._, ._, .@"and", .tmp0q, .memad(.src0q, .add_size, -16), ._, ._ }, + .{ ._, ._, .@"and", .tmp0q, .memad(.src0q, .add_dst0_size, -16), ._, ._ }, .{ ._, ._, .mov, .memad(.dst0q, .add_size, -16), .tmp0q, ._, ._ }, .{ ._, ._, .mov, .memad(.dst0q, .add_size, -8), .si(0), ._, ._ }, } }, @@ -92690,7 +92781,7 @@ fn genBody(cg: *CodeGen, body: []const Air.Inst.Index) InnerError!void { .{ ._, ._, .mov, .tmp2d, .sia(-1, .dst0, .add_size_div_8), ._, ._ }, .{ ._, .@"rep _sq", .mov, ._, ._, ._, ._ }, .{ ._, ._, .mov, .tmp0q, .ua(.dst0, .add_umax), ._, ._ }, - .{ ._, ._, .@"and", .tmp0q, .memad(.src0q, .add_size, -8), ._, ._ }, + .{ ._, ._, .@"and", .tmp0q, .memad(.src0q, .add_dst0_size, -8), ._, ._ }, .{ ._, ._, .mov, .memad(.dst0q, .add_size, -8), .tmp0q, ._, ._ }, } }, }, .{ @@ -162356,6 +162447,136 @@ fn genBody(cg: *CodeGen, body: []const Air.Inst.Index) InnerError!void { .each = .{ .once = &.{ .{ ._, ._, .mov, .leasi(.src0w, .@"2", .src1), .src2w, ._, ._ }, } }, + }, .{ + .required_features = .{ .avx, null, null, null }, + .src_constraints = .{ .any, .any, .{ .float = .word } }, + .patterns = &.{ + .{ .src = .{ .to_gpr, .simm32, .to_sse } }, + }, + .each = .{ .once = &.{ + .{ ._, .vp_w, .extr, .leaa(.src0w, .add_src0_elem_size_mul_src1), .src2x, .ui(0), ._ }, + } }, + }, .{ + .required_features = .{ .sse4_1, null, null, null }, + .src_constraints = .{ .any, .any, .{ .float = .word } }, + .patterns = &.{ + .{ .src = .{ .to_gpr, .simm32, .to_sse } }, + }, + .each = .{ .once = &.{ + .{ ._, .p_w, .extr, .leaa(.src0w, .add_src0_elem_size_mul_src1), .src2x, .ui(0), ._ }, + } }, + }, .{ + .required_features = .{ .sse2, null, null, null }, + .src_constraints = .{ .any, .any, .{ .float = .word } }, + .patterns = &.{ + .{ .src = .{ .to_gpr, .simm32, .to_sse } }, + }, + .extra_temps = .{ + .{ .type = .f16, .kind = .{ .rc = .general_purpose } }, + .unused, + .unused, + .unused, + .unused, + .unused, + .unused, + .unused, + .unused, + .unused, + .unused, + }, + .each = .{ .once = &.{ + .{ ._, .p_w, .extr, .tmp0d, .src2x, .ui(0), ._ }, + .{ ._, ._, .mov, .leaa(.src0w, .add_src0_elem_size_mul_src1), .tmp0w, ._, ._ }, + } }, + }, .{ + .required_features = .{ .sse, null, null, null }, + .src_constraints = .{ .any, .any, .{ .float = .word } }, + .patterns = &.{ + .{ .src = .{ .to_gpr, .simm32, .to_sse } }, + }, + .extra_temps = .{ + .{ .type = .f32, .kind = .mem }, + .{ .type = .f16, .kind = .{ .rc = .general_purpose } }, + .unused, + .unused, + .unused, + .unused, + .unused, + .unused, + .unused, + .unused, + .unused, + }, + .each = .{ .once = &.{ + .{ ._, ._ss, .mov, .mem(.tmp1d), .src2x, ._, ._ }, + .{ ._, ._, .mov, .tmp1d, .mem(.tmp1d), ._, ._ }, + .{ ._, ._, .mov, .leaa(.src0w, .add_src0_elem_size_mul_src1), .tmp1w, ._, ._ }, + } }, + }, .{ + .required_features = .{ .avx, null, null, null }, + .src_constraints = .{ .any, .any, .{ .float = .word } }, + .patterns = &.{ + .{ .src = .{ .to_gpr, .to_gpr, .to_sse } }, + }, + .each = .{ .once = &.{ + .{ ._, .vp_w, .extr, .leasi(.src0w, .@"2", .src1), .src2x, .ui(0), ._ }, + } }, + }, .{ + .required_features = .{ .sse4_1, null, null, null }, + .src_constraints = .{ .any, .any, .{ .float = .word } }, + .patterns = &.{ + .{ .src = .{ .to_gpr, .to_gpr, .to_sse } }, + }, + .each = .{ .once = &.{ + .{ ._, .p_w, .extr, .leasi(.src0w, .@"2", .src1), .src2x, .ui(0), ._ }, + } }, + }, .{ + .required_features = .{ .sse2, null, null, null }, + .src_constraints = .{ .any, .any, .{ .float = .word } }, + .patterns = &.{ + .{ .src = .{ .to_gpr, .simm32, .to_sse } }, + }, + .extra_temps = .{ + .{ .type = .f16, .kind = .{ .rc = .general_purpose } }, + .unused, + .unused, + .unused, + .unused, + .unused, + .unused, + .unused, + .unused, + .unused, + .unused, + }, + .each = .{ .once = &.{ + .{ ._, .p_w, .extr, .tmp0d, .src2x, .ui(0), ._ }, + .{ ._, ._, .mov, .leasi(.src0w, .@"2", .src1), .tmp0w, ._, ._ }, + } }, + }, .{ + .required_features = .{ .sse, null, null, null }, + .src_constraints = .{ .any, .any, .{ .float = .word } }, + .patterns = &.{ + .{ .src = .{ .to_gpr, .simm32, .to_sse } }, + }, + .extra_temps = .{ + .{ .type = .f32, .kind = .mem }, + .{ .type = .f16, .kind = .{ .rc = .general_purpose } }, + .unused, + .unused, + .unused, + .unused, + .unused, + .unused, + .unused, + .unused, + .unused, + }, + .each = .{ .once = &.{ + .{ ._, ._ss, .mov, .mem(.tmp1d), .src2x, ._, ._ }, + .{ ._, ._, .mov, .tmp1d, .mem(.tmp1d), ._, ._ }, + .{ ._, ._, .mov, .leasi(.src0w, .@"2", .src1), .tmp1w, ._, ._ }, + } }, }, .{ .src_constraints = .{ .any, .any, .{ .int = .dword } }, .patterns = &.{ @@ -162375,29 +162596,119 @@ fn genBody(cg: *CodeGen, body: []const Air.Inst.Index) InnerError!void { .{ ._, ._, .mov, .leasi(.src0d, .@"4", .src1), .src2d, ._, ._ }, } }, }, .{ - .required_features = .{ .@"64bit", null, null, null }, - .dst_constraints = .{ .{ .int = .qword }, .any }, + .required_features = .{ .avx, null, null, null }, + .src_constraints = .{ .any, .any, .{ .float = .dword } }, .patterns = &.{ - .{ .src = .{ .to_mem, .simm32, .simm32 } }, - .{ .src = .{ .to_mem, .simm32, .to_gpr } }, + .{ .src = .{ .to_gpr, .simm32, .to_sse } }, + }, + .each = .{ .once = &.{ + .{ ._, .v_ss, .mov, .leaa(.src0d, .add_src0_elem_size_mul_src1), .src2x, ._, ._ }, + } }, + }, .{ + .required_features = .{ .sse, null, null, null }, + .src_constraints = .{ .any, .any, .{ .float = .dword } }, + .patterns = &.{ + .{ .src = .{ .to_gpr, .simm32, .to_sse } }, + }, + .each = .{ .once = &.{ + .{ ._, ._ss, .mov, .leaa(.src0d, .add_src0_elem_size_mul_src1), .src2x, ._, ._ }, + } }, + }, .{ + .required_features = .{ .avx, null, null, null }, + .src_constraints = .{ .any, .any, .{ .float = .dword } }, + .patterns = &.{ + .{ .src = .{ .to_gpr, .to_gpr, .to_sse } }, + }, + .each = .{ .once = &.{ + .{ ._, .v_ss, .mov, .leasi(.src0d, .@"4", .src1), .src2x, ._, ._ }, + } }, + }, .{ + .required_features = .{ .sse, null, null, null }, + .src_constraints = .{ .any, .any, .{ .float = .dword } }, + .patterns = &.{ + .{ .src = .{ .to_gpr, .to_gpr, .to_sse } }, + }, + .each = .{ .once = &.{ + .{ ._, ._ss, .mov, .leasi(.src0d, .@"4", .src1), .src2x, ._, ._ }, + } }, + }, .{ + .required_features = .{ .@"64bit", null, null, null }, + .src_constraints = .{ .any, .any, .{ .int = .qword } }, + .patterns = &.{ + .{ .src = .{ .to_gpr, .simm32, .simm32 } }, + .{ .src = .{ .to_gpr, .simm32, .to_gpr } }, }, .each = .{ .once = &.{ .{ ._, ._, .mov, .leaa(.src0q, .add_src0_elem_size_mul_src1), .src2q, ._, ._ }, } }, }, .{ .required_features = .{ .@"64bit", null, null, null }, - .dst_constraints = .{ .{ .int = .qword }, .any }, + .src_constraints = .{ .any, .any, .{ .int = .qword } }, .patterns = &.{ - .{ .src = .{ .to_mem, .to_gpr, .simm32 } }, - .{ .src = .{ .to_mem, .to_gpr, .to_gpr } }, + .{ .src = .{ .to_gpr, .to_gpr, .simm32 } }, + .{ .src = .{ .to_gpr, .to_gpr, .to_gpr } }, }, .each = .{ .once = &.{ .{ ._, ._, .mov, .leasi(.src0q, .@"8", .src1), .src2q, ._, ._ }, } }, + }, .{ + .required_features = .{ .avx, null, null, null }, + .src_constraints = .{ .any, .any, .{ .float = .qword } }, + .patterns = &.{ + .{ .src = .{ .to_gpr, .simm32, .to_sse } }, + }, + .each = .{ .once = &.{ + .{ ._, .v_sd, .mov, .leaa(.src0q, .add_src0_elem_size_mul_src1), .src2x, ._, ._ }, + } }, + }, .{ + .required_features = .{ .sse2, null, null, null }, + .src_constraints = .{ .any, .any, .{ .float = .qword } }, + .patterns = &.{ + .{ .src = .{ .to_gpr, .simm32, .to_sse } }, + }, + .each = .{ .once = &.{ + .{ ._, ._sd, .mov, .leaa(.src0q, .add_src0_elem_size_mul_src1), .src2x, ._, ._ }, + } }, + }, .{ + .required_features = .{ .sse, null, null, null }, + .src_constraints = .{ .any, .any, .{ .float = .qword } }, + .patterns = &.{ + .{ .src = .{ .to_gpr, .simm32, .to_sse } }, + }, + .each = .{ .once = &.{ + .{ ._, ._ps, .movl, .leaa(.src0q, .add_src0_elem_size_mul_src1), .src2x, ._, ._ }, + } }, + }, .{ + .required_features = .{ .avx, null, null, null }, + .src_constraints = .{ .any, .any, .{ .float = .qword } }, + .patterns = &.{ + .{ .src = .{ .to_gpr, .to_gpr, .to_sse } }, + }, + .each = .{ .once = &.{ + .{ ._, .v_sd, .mov, .leasi(.src0q, .@"8", .src1), .src2x, ._, ._ }, + } }, + }, .{ + .required_features = .{ .sse2, null, null, null }, + .src_constraints = .{ .any, .any, .{ .float = .qword } }, + .patterns = &.{ + .{ .src = .{ .to_gpr, .to_gpr, .to_sse } }, + }, + .each = .{ .once = &.{ + .{ ._, ._sd, .mov, .leasi(.src0q, .@"8", .src1), .src2x, ._, ._ }, + } }, + }, .{ + .required_features = .{ .sse, null, null, null }, + .src_constraints = .{ .any, .any, .{ .float = .qword } }, + .patterns = &.{ + .{ .src = .{ .to_gpr, .to_gpr, .to_sse } }, + }, + .each = .{ .once = &.{ + .{ ._, ._ps, .movl, .leasi(.src0q, .@"8", .src1), .src2x, ._, ._ }, + } }, } }) catch |err| switch (err) { error.SelectFailed => { const elem_size = cg.typeOf(bin_op.rhs).abiSize(zcu); - while (try ops[0].toBase(false, cg) or + while (try ops[0].toRegClass(true, .general_purpose, cg) or try ops[1].toRegClass(true, .general_purpose, cg)) {} const base_reg = ops[0].tracking(cg).short.register.to64(); @@ -162410,11 +162721,10 @@ fn genBody(cg: *CodeGen, body: []const Air.Inst.Index) InnerError!void { rhs_reg, .u(elem_size), ); - try cg.asmRegisterMemory( - .{ ._, .lea }, - base_reg, - try ops[0].tracking(cg).short.mem(cg, .{ .index = rhs_reg }), - ); + try cg.asmRegisterMemory(.{ ._, .lea }, base_reg, .{ + .base = .{ .reg = base_reg }, + .mod = .{ .rm = .{ .index = rhs_reg } }, + }); } else if (elem_size > 8) { try cg.spillEflagsIfOccupied(); try cg.asmRegisterImmediate( @@ -162422,20 +162732,18 @@ fn genBody(cg: *CodeGen, body: []const Air.Inst.Index) InnerError!void { rhs_reg, .u(std.math.log2_int(u64, elem_size)), ); - try cg.asmRegisterMemory( - .{ ._, .lea }, - base_reg, - try ops[0].tracking(cg).short.mem(cg, .{ .index = rhs_reg }), - ); - } else try cg.asmRegisterMemory( - .{ ._, .lea }, - base_reg, - try ops[0].tracking(cg).short.mem(cg, .{ + try cg.asmRegisterMemory(.{ ._, .lea }, base_reg, .{ + .base = .{ .reg = base_reg }, + .mod = .{ .rm = .{ .index = rhs_reg } }, + }); + } else try cg.asmRegisterMemory(.{ ._, .lea }, base_reg, .{ + .base = .{ .reg = base_reg }, + .mod = .{ .rm = .{ .index = rhs_reg, .scale = .fromFactor(@intCast(elem_size)), - }), - ); - try ops[0].store(&ops[1], .{}, cg); + } }, + }); + try ops[0].store(&ops[2], .{}, cg); }, else => |e| return e, }; @@ -165315,9 +165623,7 @@ fn airShlShrBinOp(self: *CodeGen, inst: Air.Inst.Index) !void { .ty = mask_ty.toIntern(), .storage = .{ .elems = &([1]InternPool.Index{ (try rhs_ty.childType(zcu).maxIntScalar(pt, .u8)).toIntern(), - } ++ [1]InternPool.Index{ - (try pt.intValue(.u8, 0)).toIntern(), - } ** 15) }, + } ++ [1]InternPool.Index{.zero_u8} ** 15) }, } }))); const mask_addr_reg = try self.copyToTmpRegister(.usize, mask_mcv.address()); const mask_addr_lock = self.register_manager.lockRegAssumeUnused(mask_addr_reg); @@ -168138,7 +168444,7 @@ fn load(self: *CodeGen, dst_mcv: MCValue, ptr_ty: Type, ptr_mcv: MCValue) InnerE .register_quadruple, .register_overflow, .register_mask, - .elementwise_regs_then_frame, + .elementwise_args, .reserved_frame, => unreachable, // not a valid pointer .immediate, @@ -168356,7 +168662,7 @@ fn store( .register_quadruple, .register_overflow, .register_mask, - .elementwise_regs_then_frame, + .elementwise_args, .reserved_frame, => unreachable, // not a valid pointer .immediate, @@ -168842,7 +169148,7 @@ fn genUnOpMir(self: *CodeGen, mir_tag: Mir.Inst.FixedTag, dst_ty: Type, dst_mcv: .lea_direct, .lea_got, .lea_frame, - .elementwise_regs_then_frame, + .elementwise_args, .reserved_frame, .air_ref, => unreachable, // unmodifiable destination @@ -170513,7 +170819,7 @@ fn genBinOp( .load_got, .lea_got, .lea_frame, - .elementwise_regs_then_frame, + .elementwise_args, .reserved_frame, .air_ref, => unreachable, @@ -171696,7 +172002,7 @@ fn genBinOpMir( .lea_got, .lea_frame, .lea_symbol, - .elementwise_regs_then_frame, + .elementwise_args, .reserved_frame, .air_ref, => unreachable, // unmodifiable destination @@ -171732,7 +172038,7 @@ fn genBinOpMir( .undef, .register_overflow, .register_mask, - .elementwise_regs_then_frame, + .elementwise_args, .reserved_frame, => unreachable, .register, @@ -171892,7 +172198,7 @@ fn genBinOpMir( .undef, .register_overflow, .register_mask, - .elementwise_regs_then_frame, + .elementwise_args, .reserved_frame, .air_ref, => unreachable, @@ -171988,7 +172294,7 @@ fn genBinOpMir( .undef, .register_overflow, .register_mask, - .elementwise_regs_then_frame, + .elementwise_args, .reserved_frame, .air_ref, => unreachable, @@ -172119,7 +172425,7 @@ fn genIntMulComplexOpMir(self: *CodeGen, dst_ty: Type, dst_mcv: MCValue, src_mcv .lea_direct, .lea_got, .lea_frame, - .elementwise_regs_then_frame, + .elementwise_args, .reserved_frame, .air_ref, => unreachable, // unmodifiable destination @@ -172151,7 +172457,7 @@ fn genIntMulComplexOpMir(self: *CodeGen, dst_ty: Type, dst_mcv: MCValue, src_mcv .register_quadruple, .register_overflow, .register_mask, - .elementwise_regs_then_frame, + .elementwise_args, .reserved_frame, .air_ref, => unreachable, @@ -172271,7 +172577,7 @@ fn airArg(self: *CodeGen, inst: Air.Inst.Index) !void { try self.genCopy(arg_ty, dst_mcv, src_mcv, .{}); break :result dst_mcv; }, - .elementwise_regs_then_frame => |regs_frame_addr| { + .elementwise_args => |regs_frame_addr| { try self.spillEflagsIfOccupied(); const fn_info = zcu.typeToFunc(self.fn_type).?; @@ -172375,7 +172681,7 @@ fn genLocalDebugInfo( .arg, .dbg_arg_inline, .dbg_var_val => |tag| { switch (mcv) { .none => try self.asmAir(.dbg_local, inst), - .unreach, .dead, .elementwise_regs_then_frame, .reserved_frame, .air_ref => unreachable, + .unreach, .dead, .elementwise_args, .reserved_frame, .air_ref => unreachable, .immediate => |imm| try self.asmAirImmediate(.dbg_local, inst, .u(imm)), .lea_frame => |frame_addr| try self.asmAirFrameAddress(.dbg_local, inst, frame_addr), .lea_symbol => |sym_off| try self.asmAirImmediate(.dbg_local, inst, .rel(sym_off)), @@ -172398,7 +172704,7 @@ fn genLocalDebugInfo( }, .dbg_var_ptr => switch (mcv) { else => unreachable, - .unreach, .dead, .elementwise_regs_then_frame, .reserved_frame, .air_ref => unreachable, + .unreach, .dead, .elementwise_args, .reserved_frame, .air_ref => unreachable, .lea_frame => |frame_addr| try self.asmAirMemory(.dbg_local, inst, .{ .base = .{ .frame = frame_addr.index }, .mod = .{ .rm = .{ @@ -172567,7 +172873,7 @@ fn genCall(self: *CodeGen, info: union(enum) { try self.genCopy(arg_ty, dst_arg, src_arg, opts); try self.freeValue(src_arg); }, - .elementwise_regs_then_frame => |regs_frame_addr| { + .elementwise_args => |regs_frame_addr| { const index_reg = try self.register_manager.allocReg(null, abi.RegisterClass.gp); const index_lock = self.register_manager.lockRegAssumeUnused(index_reg); defer self.register_manager.unlockReg(index_lock); @@ -172676,7 +172982,7 @@ fn genCall(self: *CodeGen, info: union(enum) { .indirect => |reg_off| try self.genSetReg(reg_off.reg, .usize, .{ .lea_frame = .{ .index = frame_index, .off = -reg_off.off }, }, .{}), - .elementwise_regs_then_frame => |regs_frame_addr| { + .elementwise_args => |regs_frame_addr| { const src_mem: Memory = if (src_arg.isBase()) try src_arg.mem(self, .{ .size = .dword }) else .{ .base = .{ .reg = try self.copyToTmpRegister( .usize, @@ -173064,7 +173370,7 @@ fn airCmp(self: *CodeGen, inst: Air.Inst.Index, op: std.math.CompareOperator) !v .lea_got, .lea_frame, .lea_symbol, - .elementwise_regs_then_frame, + .elementwise_args, .reserved_frame, .air_ref, => unreachable, @@ -173119,7 +173425,7 @@ fn airCmp(self: *CodeGen, inst: Air.Inst.Index, op: std.math.CompareOperator) !v .lea_direct, .lea_got, .lea_frame, - .elementwise_regs_then_frame, + .elementwise_args, .reserved_frame, .air_ref, => unreachable, @@ -173524,7 +173830,7 @@ fn isNull(self: *CodeGen, inst: Air.Inst.Index, opt_ty: Type, opt_mcv: MCValue) .lea_direct, .lea_got, .lea_symbol, - .elementwise_regs_then_frame, + .elementwise_args, .reserved_frame, .air_ref, => unreachable, @@ -173868,17 +174174,23 @@ fn lowerBlock(self: *CodeGen, inst: Air.Inst.Index, body: []const Air.Inst.Index var block_data = self.blocks.fetchRemove(inst).?; defer block_data.value.deinit(self.gpa); if (block_data.value.relocs.items.len > 0) { + var last_inst: Mir.Inst.Index = @intCast(self.mir_instructions.len - 1); + while (block_data.value.relocs.getLastOrNull() == last_inst) { + block_data.value.relocs.items.len -= 1; + self.mir_instructions.set(last_inst, .{ + .tag = .pseudo, + .ops = .pseudo_dead_none, + .data = undefined, + }); + last_inst -= 1; + } + for (block_data.value.relocs.items) |block_reloc| self.performReloc(block_reloc); try self.restoreState(block_data.value.state, liveness.deaths, .{ .emit_instructions = false, .update_tracking = true, .resurrect = true, .close_scope = true, }); - const block_relocs_last_index = block_data.value.relocs.items.len - 1; - for (if (block_data.value.relocs.items[block_relocs_last_index] == self.mir_instructions.len - 1) block_relocs: { - _ = self.mir_instructions.pop(); - break :block_relocs block_data.value.relocs.items[0..block_relocs_last_index]; - } else block_data.value.relocs.items) |block_reloc| self.performReloc(block_reloc); } if (std.debug.runtime_safety) assert(self.inst_tracking.getIndex(inst).? == inst_tracking_i); @@ -174453,18 +174765,6 @@ fn airBr(self: *CodeGen, inst: Air.Inst.Index) !void { try self.freeValue(block_tracking.short); } -fn airRepeat(self: *CodeGen, inst: Air.Inst.Index) !void { - const loop_inst = self.air.instructions.items(.data)[@intFromEnum(inst)].repeat.loop_inst; - const repeat_info = self.loops.get(loop_inst).?; - try self.restoreState(repeat_info.state, &.{}, .{ - .emit_instructions = true, - .update_tracking = false, - .resurrect = false, - .close_scope = true, - }); - _ = try self.asmJmpReloc(repeat_info.target); -} - fn airAsm(self: *CodeGen, inst: Air.Inst.Index) !void { @setEvalBranchQuota(1_100); const pt = self.pt; @@ -175587,7 +175887,7 @@ fn genCopy(self: *CodeGen, ty: Type, dst_mcv: MCValue, src_mcv: MCValue, opts: C .lea_got, .lea_frame, .lea_symbol, - .elementwise_regs_then_frame, + .elementwise_args, .reserved_frame, .air_ref, => unreachable, // unmodifiable destination @@ -175598,7 +175898,7 @@ fn genCopy(self: *CodeGen, ty: Type, dst_mcv: MCValue, src_mcv: MCValue, opts: C .dead, .undef, .register_overflow, - .elementwise_regs_then_frame, + .elementwise_args, .reserved_frame, => unreachable, .immediate, @@ -175776,7 +176076,7 @@ fn genSetReg( .none, .unreach, .dead, - .elementwise_regs_then_frame, + .elementwise_args, .reserved_frame, => unreachable, .undef => if (opts.safety) switch (dst_reg.class()) { @@ -176313,7 +176613,7 @@ fn genSetMem( .none, .unreach, .dead, - .elementwise_regs_then_frame, + .elementwise_args, .reserved_frame, => unreachable, .undef => if (opts.safety) try self.genInlineMemset( @@ -178566,10 +178866,10 @@ fn airSelect(self: *CodeGen, inst: Air.Inst.Index) !void { const mask_ty = try pt.vectorType(.{ .len = vec_len, .child = mask_elem_ty.toIntern() }); var mask_elems_buf: [32]InternPool.Index = undefined; const mask_elems = mask_elems_buf[0..vec_len]; - for (mask_elems, 0..) |*elem, bit| elem.* = try pt.intern(.{ .int = .{ - .ty = mask_elem_ty.toIntern(), - .storage = .{ .u64 = @as(u64, 1) << @intCast(bit) }, - } }); + for (mask_elems, 0..) |*elem, bit| elem.* = (try pt.intValue( + mask_elem_ty, + @as(u8, 1) << @truncate(bit), + )).toIntern(); const mask_mcv = try self.genTypedValue(.fromInterned(try pt.intern(.{ .aggregate = .{ .ty = mask_ty.toIntern(), .storage = .{ .elems = mask_elems }, @@ -179437,16 +179737,13 @@ fn airShuffle(self: *CodeGen, inst: Air.Inst.Index) !void { var lhs_mask_elems: [16]InternPool.Index = undefined; for (lhs_mask_elems[0..max_abi_size], 0..) |*lhs_mask_elem, byte_index| { const elem_index = byte_index / elem_abi_size; - lhs_mask_elem.* = try pt.intern(.{ .int = .{ - .ty = .u8_type, - .storage = .{ .u64 = if (elem_index >= mask_elems.len) 0b1_00_00000 else elem: { - const mask_elem = mask_elems[elem_index] orelse break :elem 0b1_00_00000; - if (mask_elem < 0) break :elem 0b1_00_00000; - const mask_elem_index: u31 = @intCast(mask_elem); - const byte_off: u32 = @intCast(byte_index % elem_abi_size); - break :elem @intCast(mask_elem_index * elem_abi_size + byte_off); - } }, - } }); + lhs_mask_elem.* = (try pt.intValue(.u8, if (elem_index >= mask_elems.len) 0b1_00_00000 else elem: { + const mask_elem = mask_elems[elem_index] orelse break :elem 0b1_00_00000; + if (mask_elem < 0) break :elem 0b1_00_00000; + const mask_elem_index: u31 = @intCast(mask_elem); + const byte_off: u32 = @intCast(byte_index % elem_abi_size); + break :elem mask_elem_index * elem_abi_size + byte_off; + })).toIntern(); } const lhs_mask_ty = try pt.vectorType(.{ .len = max_abi_size, .child = .u8_type }); const lhs_mask_mcv = try self.genTypedValue(.fromInterned(try pt.intern(.{ .aggregate = .{ @@ -179471,16 +179768,13 @@ fn airShuffle(self: *CodeGen, inst: Air.Inst.Index) !void { var rhs_mask_elems: [16]InternPool.Index = undefined; for (rhs_mask_elems[0..max_abi_size], 0..) |*rhs_mask_elem, byte_index| { const elem_index = byte_index / elem_abi_size; - rhs_mask_elem.* = try pt.intern(.{ .int = .{ - .ty = .u8_type, - .storage = .{ .u64 = if (elem_index >= mask_elems.len) 0b1_00_00000 else elem: { - const mask_elem = mask_elems[elem_index] orelse break :elem 0b1_00_00000; - if (mask_elem >= 0) break :elem 0b1_00_00000; - const mask_elem_index: u31 = @intCast(~mask_elem); - const byte_off: u32 = @intCast(byte_index % elem_abi_size); - break :elem @intCast(mask_elem_index * elem_abi_size + byte_off); - } }, - } }); + rhs_mask_elem.* = (try pt.intValue(.u8, if (elem_index >= mask_elems.len) 0b1_00_00000 else elem: { + const mask_elem = mask_elems[elem_index] orelse break :elem 0b1_00_00000; + if (mask_elem >= 0) break :elem 0b1_00_00000; + const mask_elem_index: u31 = @intCast(~mask_elem); + const byte_off: u32 = @intCast(byte_index % elem_abi_size); + break :elem mask_elem_index * elem_abi_size + byte_off; + })).toIntern(); } const rhs_mask_ty = try pt.vectorType(.{ .len = max_abi_size, .child = .u8_type }); const rhs_mask_mcv = try self.genTypedValue(.fromInterned(try pt.intern(.{ .aggregate = .{ @@ -180611,7 +180905,7 @@ fn resolveCallingConventionValues( result.stack_byte_count = std.mem.alignForward(u31, result.stack_byte_count, frame_elem_align); - arg_mcv[arg_mcv_i] = .{ .elementwise_regs_then_frame = .{ + arg_mcv[arg_mcv_i] = .{ .elementwise_args = .{ .regs = remaining_param_int_regs, .frame_off = @intCast(result.stack_byte_count), .frame_index = stack_frame_base, @@ -181236,7 +181530,7 @@ const Temp = struct { .load_got, .lea_got, .lea_frame, - .elementwise_regs_then_frame, + .elementwise_args, .reserved_frame, .air_ref, => false, @@ -181671,7 +181965,7 @@ const Temp = struct { .register_quadruple, .register_overflow, .register_mask, - .elementwise_regs_then_frame, + .elementwise_args, .reserved_frame, .air_ref, => unreachable, // not a valid pointer @@ -186395,19 +186689,46 @@ const Temp = struct { if (cg.reused_operands.isSet(op_index)) continue; try cg.processDeath(op_ref.toIndexAllowNone() orelse continue); } - if (cg.liveness.isUnused(inst)) try temp.die(cg) else switch (temp.unwrap(cg)) { - .ref, .err_ret_trace => { - const result = try cg.allocRegOrMem(inst, true); - try cg.genCopy(cg.typeOfIndex(inst), result, temp.tracking(cg).short, .{}); - tracking_log.debug("{} => {} (birth)", .{ inst, result }); - cg.inst_tracking.putAssumeCapacityNoClobber(inst, .init(result)); - }, - .temp => |temp_index| { - const temp_tracking = temp_index.tracking(cg); - tracking_log.debug("{} => {} (birth)", .{ inst, temp_tracking.short }); - cg.inst_tracking.putAssumeCapacityNoClobber(inst, .init(temp_tracking.short)); - assert(cg.reuseTemp(inst, temp_index.toIndex(), temp_tracking)); - }, + if (cg.liveness.isUnused(inst)) try temp.die(cg) else { + switch (temp.unwrap(cg)) { + .ref, .err_ret_trace => { + const temp_mcv = temp.tracking(cg).short; + const result = result: switch (temp_mcv) { + .none, .unreach, .dead, .elementwise_args, .reserved_frame, .air_ref => unreachable, + .undef, .immediate, .lea_frame => temp_mcv, + .eflags, + .register, + .register_pair, + .register_triple, + .register_quadruple, + .register_offset, + .register_overflow, + .register_mask, + .memory, + .load_symbol, + .lea_symbol, + .indirect, + .load_direct, + .lea_direct, + .load_got, + .lea_got, + .load_frame, + => { + const result = try cg.allocRegOrMem(inst, true); + try cg.genCopy(cg.typeOfIndex(inst), result, temp_mcv, .{}); + break :result result; + }, + }; + tracking_log.debug("{} => {} (birth)", .{ inst, result }); + cg.inst_tracking.putAssumeCapacityNoClobber(inst, .init(result)); + }, + .temp => |temp_index| { + const temp_tracking = temp_index.tracking(cg); + tracking_log.debug("{} => {} (birth)", .{ inst, temp_tracking.short }); + cg.inst_tracking.putAssumeCapacityNoClobber(inst, .init(temp_tracking.short)); + assert(cg.reuseTemp(inst, temp_index.toIndex(), temp_tracking)); + }, + } } for (0.., op_refs, op_temps) |op_index, op_ref, op_temp| { if (op_temp.index != temp.index) continue; @@ -187950,6 +188271,7 @@ const Select = struct { ptr_bit_size, size, src0_size, + dst0_size, delta_size, delta_elem_size, unaligned_size, @@ -187993,6 +188315,7 @@ const Select = struct { const sub_src0_size: Adjust = .{ .sign = .neg, .lhs = .src0_size, .op = .mul, .rhs = .@"1" }; const add_src0_size: Adjust = .{ .sign = .pos, .lhs = .src0_size, .op = .mul, .rhs = .@"1" }; const add_8_src0_size: Adjust = .{ .sign = .pos, .lhs = .src0_size, .op = .mul, .rhs = .@"8" }; + const add_dst0_size: Adjust = .{ .sign = .pos, .lhs = .dst0_size, .op = .mul, .rhs = .@"1" }; const add_delta_size_div_8: Adjust = .{ .sign = .pos, .lhs = .delta_size, .op = .div, .rhs = .@"8" }; const add_delta_elem_size: Adjust = .{ .sign = .pos, .lhs = .delta_elem_size, .op = .mul, .rhs = .@"1" }; const add_delta_elem_size_div_8: Adjust = .{ .sign = .pos, .lhs = .delta_elem_size, .op = .div, .rhs = .@"8" }; @@ -188788,6 +189111,7 @@ const Select = struct { .ptr_bit_size => s.cg.target.ptrBitWidth(), .size => @intCast(op.flags.base.ref.typeOf(s).abiSize(s.cg.pt.zcu)), .src0_size => @intCast(Select.Operand.Ref.src0.typeOf(s).abiSize(s.cg.pt.zcu)), + .dst0_size => @intCast(Select.Operand.Ref.dst0.typeOf(s).abiSize(s.cg.pt.zcu)), .delta_size => @intCast(@as(SignedImm, @intCast(op.flags.base.ref.typeOf(s).abiSize(s.cg.pt.zcu))) - @as(SignedImm, @intCast(op.flags.index.ref.typeOf(s).abiSize(s.cg.pt.zcu)))), .delta_elem_size => @intCast(@as(SignedImm, @intCast(op.flags.base.ref.typeOf(s).elemType2(s.cg.pt.zcu).abiSize(s.cg.pt.zcu))) - diff --git a/src/codegen.zig b/src/codegen.zig index f5108162ca..1f794bbeea 100644 --- a/src/codegen.zig +++ b/src/codegen.zig @@ -27,13 +27,27 @@ pub const CodeGenError = GenerateSymbolError || error{ CodegenFail, }; -fn devFeatureForBackend(comptime backend: std.builtin.CompilerBackend) dev.Feature { - comptime assert(mem.startsWith(u8, @tagName(backend), "stage2_")); - return @field(dev.Feature, @tagName(backend)["stage2_".len..] ++ "_backend"); +fn devFeatureForBackend(backend: std.builtin.CompilerBackend) dev.Feature { + return switch (backend) { + .other, .stage1 => unreachable, + .stage2_aarch64 => .aarch64_backend, + .stage2_arm => .arm_backend, + .stage2_c => .c_backend, + .stage2_llvm => .llvm_backend, + .stage2_powerpc => .powerpc_backend, + .stage2_riscv64 => .riscv64_backend, + .stage2_sparc64 => .sparc64_backend, + .stage2_spirv64 => .spirv64_backend, + .stage2_wasm => .wasm_backend, + .stage2_x86 => .x86_backend, + .stage2_x86_64 => .x86_64_backend, + _ => unreachable, + }; } -pub fn importBackend(comptime backend: std.builtin.CompilerBackend) ?type { +fn importBackend(comptime backend: std.builtin.CompilerBackend) type { return switch (backend) { + .other, .stage1 => unreachable, .stage2_aarch64 => @import("arch/aarch64/CodeGen.zig"), .stage2_arm => @import("arch/arm/CodeGen.zig"), .stage2_c => @import("codegen/c.zig"), @@ -42,11 +56,35 @@ pub fn importBackend(comptime backend: std.builtin.CompilerBackend) ?type { .stage2_riscv64 => @import("arch/riscv64/CodeGen.zig"), .stage2_sparc64 => @import("arch/sparc64/CodeGen.zig"), .stage2_spirv64 => @import("codegen/spirv.zig"), - .stage2_x86_64 => @import("arch/x86_64/CodeGen.zig"), - else => null, + .stage2_wasm => @import("arch/wasm/CodeGen.zig"), + .stage2_x86, .stage2_x86_64 => @import("arch/x86_64/CodeGen.zig"), + _ => unreachable, }; } +pub fn legalizeFeatures(pt: Zcu.PerThread, nav_index: InternPool.Nav.Index) ?*const Air.Legalize.Features { + const zcu = pt.zcu; + const target = &zcu.navFileScope(nav_index).mod.?.resolved_target.result; + switch (target_util.zigBackend(target.*, zcu.comp.config.use_llvm)) { + else => unreachable, + inline .stage2_llvm, + .stage2_c, + .stage2_wasm, + .stage2_arm, + .stage2_x86_64, + .stage2_aarch64, + .stage2_x86, + .stage2_riscv64, + .stage2_sparc64, + .stage2_spirv64, + .stage2_powerpc, + => |backend| { + dev.check(devFeatureForBackend(backend)); + return importBackend(backend).legalizeFeatures(target); + }, + } +} + pub fn generateFunction( lf: *link.File, pt: Zcu.PerThread, @@ -60,7 +98,7 @@ pub fn generateFunction( const zcu = pt.zcu; const func = zcu.funcInfo(func_index); const target = zcu.navFileScope(func.owner_nav).mod.?.resolved_target.result; - switch (target_util.zigBackend(target, false)) { + switch (target_util.zigBackend(target, zcu.comp.config.use_llvm)) { else => unreachable, inline .stage2_aarch64, .stage2_arm, @@ -70,7 +108,7 @@ pub fn generateFunction( .stage2_x86_64, => |backend| { dev.check(devFeatureForBackend(backend)); - return importBackend(backend).?.generate(lf, pt, src_loc, func_index, air, liveness, code, debug_output); + return importBackend(backend).generate(lf, pt, src_loc, func_index, air, liveness, code, debug_output); }, } } @@ -88,14 +126,14 @@ pub fn generateLazyFunction( zcu.fileByIndex(inst_index.resolveFile(&zcu.intern_pool)).mod.?.resolved_target.result else zcu.getTarget(); - switch (target_util.zigBackend(target, false)) { + switch (target_util.zigBackend(target, zcu.comp.config.use_llvm)) { else => unreachable, inline .stage2_powerpc, .stage2_riscv64, .stage2_x86_64, => |backend| { dev.check(devFeatureForBackend(backend)); - return importBackend(backend).?.generateLazy(lf, pt, src_loc, lazy_sym, code, debug_output); + return importBackend(backend).generateLazy(lf, pt, src_loc, lazy_sym, code, debug_output); }, } } diff --git a/src/codegen/c.zig b/src/codegen/c.zig index 8539efdbfe..c68abc06ce 100644 --- a/src/codegen/c.zig +++ b/src/codegen/c.zig @@ -4,6 +4,7 @@ const assert = std.debug.assert; const mem = std.mem; const log = std.log.scoped(.c); +const dev = @import("../dev.zig"); const link = @import("../link.zig"); const Zcu = @import("../Zcu.zig"); const Module = @import("../Package/Module.zig"); @@ -20,6 +21,15 @@ const Alignment = InternPool.Alignment; const BigIntLimb = std.math.big.Limb; const BigInt = std.math.big.int; +pub fn legalizeFeatures(_: *const std.Target) ?*const Air.Legalize.Features { + return if (dev.env.supports(.legalize)) comptime &.initMany(&.{ + .expand_intcast_safe, + .expand_add_safe, + .expand_sub_safe, + .expand_mul_safe, + }) else null; // we don't currently ask zig1 to use safe optimization modes +} + pub const CType = @import("c/Type.zig"); pub const CValue = union(enum) { @@ -206,7 +216,6 @@ const reserved_idents = std.StaticStringMap(void).initComptime(.{ .{ "atomic_ushort", {} }, .{ "atomic_wchar_t", {} }, .{ "auto", {} }, - .{ "bool", {} }, .{ "break", {} }, .{ "case", {} }, .{ "char", {} }, @@ -266,6 +275,11 @@ const reserved_idents = std.StaticStringMap(void).initComptime(.{ .{ "va_end", {} }, .{ "va_copy", {} }, + // stdbool.h + .{ "bool", {} }, + .{ "false", {} }, + .{ "true", {} }, + // stddef.h .{ "offsetof", {} }, @@ -1591,7 +1605,7 @@ pub const DeclGen = struct { try writer.writeAll("(("); try dg.renderCType(writer, ctype); return writer.print("){x})", .{ - try dg.fmtIntLiteral(try pt.undefValue(.usize), .Other), + try dg.fmtIntLiteral(.undef_usize, .Other), }); }, .slice => { @@ -1605,7 +1619,7 @@ pub const DeclGen = struct { const ptr_ty = ty.slicePtrFieldType(zcu); try dg.renderType(writer, ptr_ty); return writer.print("){x}, {0x}}}", .{ - try dg.fmtIntLiteral(try dg.pt.undefValue(.usize), .Other), + try dg.fmtIntLiteral(.undef_usize, .Other), }); }, }, @@ -3360,7 +3374,8 @@ fn genBodyInner(f: *Function, body: []const Air.Inst.Index) error{ AnalysisFail, .error_name => try airErrorName(f, inst), .splat => try airSplat(f, inst), .select => try airSelect(f, inst), - .shuffle => try airShuffle(f, inst), + .shuffle_one => try airShuffleOne(f, inst), + .shuffle_two => try airShuffleTwo(f, inst), .reduce => try airReduce(f, inst), .aggregate_init => try airAggregateInit(f, inst), .union_init => try airUnionInit(f, inst), @@ -4179,7 +4194,7 @@ fn airOverflow(f: *Function, inst: Air.Inst.Index, operation: []const u8, info: try v.elem(f, w); try w.writeAll(", "); try f.writeCValue(w, rhs, .FunctionArgument); - try v.elem(f, w); + if (f.typeOf(bin_op.rhs).isVector(zcu)) try v.elem(f, w); try f.object.dg.renderBuiltinInfo(w, scalar_ty, info); try w.writeAll(");\n"); try v.end(f, inst, w); @@ -6376,7 +6391,7 @@ fn airArrayToSlice(f: *Function, inst: Air.Inst.Index) !CValue { if (operand_child_ctype.info(ctype_pool) == .array) { try writer.writeByte('&'); try f.writeCValueDeref(writer, operand); - try writer.print("[{}]", .{try f.fmtIntLiteral(try pt.intValue(.usize, 0))}); + try writer.print("[{}]", .{try f.fmtIntLiteral(.zero_usize)}); } else try f.writeCValue(writer, operand, .Other); } try a.end(f, writer); @@ -6536,7 +6551,7 @@ fn airBinBuiltinCall( try v.elem(f, writer); try writer.writeAll(", "); try f.writeCValue(writer, rhs, .FunctionArgument); - try v.elem(f, writer); + if (f.typeOf(bin_op.rhs).isVector(zcu)) try v.elem(f, writer); try f.object.dg.renderBuiltinInfo(writer, scalar_ty, info); try writer.writeAll(");\n"); try v.end(f, inst, writer); @@ -6907,7 +6922,7 @@ fn airMemset(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue { try writer.writeAll("for ("); try f.writeCValue(writer, index, .Other); try writer.writeAll(" = "); - try f.object.dg.renderValue(writer, try pt.intValue(.usize, 0), .Other); + try f.object.dg.renderValue(writer, .zero_usize, .Other); try writer.writeAll("; "); try f.writeCValue(writer, index, .Other); try writer.writeAll(" != "); @@ -7149,34 +7164,73 @@ fn airSelect(f: *Function, inst: Air.Inst.Index) !CValue { return local; } -fn airShuffle(f: *Function, inst: Air.Inst.Index) !CValue { +fn airShuffleOne(f: *Function, inst: Air.Inst.Index) !CValue { const pt = f.object.dg.pt; const zcu = pt.zcu; - const ty_pl = f.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; - const extra = f.air.extraData(Air.Shuffle, ty_pl.payload).data; - const mask = Value.fromInterned(extra.mask); - const lhs = try f.resolveInst(extra.a); - const rhs = try f.resolveInst(extra.b); - - const inst_ty = f.typeOfIndex(inst); + const unwrapped = f.air.unwrapShuffleOne(zcu, inst); + const mask = unwrapped.mask; + const operand = try f.resolveInst(unwrapped.operand); + const inst_ty = unwrapped.result_ty; const writer = f.object.writer(); const local = try f.allocLocal(inst, inst_ty); - try reap(f, inst, &.{ extra.a, extra.b }); // local cannot alias operands - for (0..extra.mask_len) |index| { + try reap(f, inst, &.{unwrapped.operand}); // local cannot alias operand + for (mask, 0..) |mask_elem, out_idx| { try f.writeCValue(writer, local, .Other); try writer.writeByte('['); - try f.object.dg.renderValue(writer, try pt.intValue(.usize, index), .Other); + try f.object.dg.renderValue(writer, try pt.intValue(.usize, out_idx), .Other); try writer.writeAll("] = "); + switch (mask_elem.unwrap()) { + .elem => |src_idx| { + try f.writeCValue(writer, operand, .Other); + try writer.writeByte('['); + try f.object.dg.renderValue(writer, try pt.intValue(.usize, src_idx), .Other); + try writer.writeByte(']'); + }, + .value => |val| try f.object.dg.renderValue(writer, .fromInterned(val), .Other), + } + try writer.writeAll(";\n"); + } - const mask_elem = (try mask.elemValue(pt, index)).toSignedInt(zcu); - const src_val = try pt.intValue(.usize, @as(u64, @intCast(mask_elem ^ mask_elem >> 63))); + return local; +} - try f.writeCValue(writer, if (mask_elem >= 0) lhs else rhs, .Other); +fn airShuffleTwo(f: *Function, inst: Air.Inst.Index) !CValue { + const pt = f.object.dg.pt; + const zcu = pt.zcu; + + const unwrapped = f.air.unwrapShuffleTwo(zcu, inst); + const mask = unwrapped.mask; + const operand_a = try f.resolveInst(unwrapped.operand_a); + const operand_b = try f.resolveInst(unwrapped.operand_b); + const inst_ty = unwrapped.result_ty; + const elem_ty = inst_ty.childType(zcu); + + const writer = f.object.writer(); + const local = try f.allocLocal(inst, inst_ty); + try reap(f, inst, &.{ unwrapped.operand_a, unwrapped.operand_b }); // local cannot alias operands + for (mask, 0..) |mask_elem, out_idx| { + try f.writeCValue(writer, local, .Other); try writer.writeByte('['); - try f.object.dg.renderValue(writer, src_val, .Other); - try writer.writeAll("];\n"); + try f.object.dg.renderValue(writer, try pt.intValue(.usize, out_idx), .Other); + try writer.writeAll("] = "); + switch (mask_elem.unwrap()) { + .a_elem => |src_idx| { + try f.writeCValue(writer, operand_a, .Other); + try writer.writeByte('['); + try f.object.dg.renderValue(writer, try pt.intValue(.usize, src_idx), .Other); + try writer.writeByte(']'); + }, + .b_elem => |src_idx| { + try f.writeCValue(writer, operand_b, .Other); + try writer.writeByte('['); + try f.object.dg.renderValue(writer, try pt.intValue(.usize, src_idx), .Other); + try writer.writeByte(']'); + }, + .undef => try f.object.dg.renderUndefValue(writer, elem_ty, .Other), + } + try writer.writeAll(";\n"); } return local; @@ -8311,11 +8365,11 @@ const Vectorize = struct { try writer.writeAll("for ("); try f.writeCValue(writer, local, .Other); - try writer.print(" = {d}; ", .{try f.fmtIntLiteral(try pt.intValue(.usize, 0))}); + try writer.print(" = {d}; ", .{try f.fmtIntLiteral(.zero_usize)}); try f.writeCValue(writer, local, .Other); try writer.print(" < {d}; ", .{try f.fmtIntLiteral(try pt.intValue(.usize, ty.vectorLen(zcu)))}); try f.writeCValue(writer, local, .Other); - try writer.print(" += {d}) {{\n", .{try f.fmtIntLiteral(try pt.intValue(.usize, 1))}); + try writer.print(" += {d}) {{\n", .{try f.fmtIntLiteral(.one_usize)}); f.object.indent_writer.pushIndent(); break :index .{ .index = local }; diff --git a/src/codegen/c/Type.zig b/src/codegen/c/Type.zig index 7d3a485e2a..e5901ec626 100644 --- a/src/codegen/c/Type.zig +++ b/src/codegen/c/Type.zig @@ -1408,6 +1408,15 @@ pub const Pool = struct { .bits = pt.zcu.errorSetBits(), }, mod, kind), + .ptr_usize_type, + => return pool.getPointer(allocator, .{ + .elem_ctype = .usize, + }), + .ptr_const_comptime_int_type, + => return pool.getPointer(allocator, .{ + .elem_ctype = .void, + .@"const" = true, + }), .manyptr_u8_type, => return pool.getPointer(allocator, .{ .elem_ctype = .u8, @@ -1418,11 +1427,6 @@ pub const Pool = struct { .elem_ctype = .u8, .@"const" = true, }), - .single_const_pointer_to_comptime_int_type, - => return pool.getPointer(allocator, .{ - .elem_ctype = .void, - .@"const" = true, - }), .slice_const_u8_type, .slice_const_u8_sentinel_0_type, => { @@ -2157,11 +2161,16 @@ pub const Pool = struct { }, .undef, + .undef_bool, + .undef_usize, + .undef_u1, .zero, .zero_usize, + .zero_u1, .zero_u8, .one, .one_usize, + .one_u1, .one_u8, .four_u8, .negative_one, @@ -2172,7 +2181,7 @@ pub const Pool = struct { .bool_false, .empty_tuple, .none, - => unreachable, + => unreachable, // values, not types _ => |ip_index| switch (ip.indexToKey(ip_index)) { .int_type => |int_info| return pool.fromIntInfo(allocator, int_info, mod, kind), diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index d2a72502ed..268a57417b 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -36,6 +36,10 @@ const compilerRtIntAbbrev = target_util.compilerRtIntAbbrev; const Error = error{ OutOfMemory, CodegenFail }; +pub fn legalizeFeatures(_: *const std.Target) ?*const Air.Legalize.Features { + return null; +} + fn subArchName(features: std.Target.Cpu.Feature.Set, arch: anytype, mappings: anytype) ?[]const u8 { inline for (mappings) |mapping| { if (arch.featureSetHas(features, mapping[0])) return mapping[1]; @@ -3081,10 +3085,11 @@ pub const Object = struct { .undefined_type, .enum_literal_type, => unreachable, + .ptr_usize_type, + .ptr_const_comptime_int_type, .manyptr_u8_type, .manyptr_const_u8_type, .manyptr_const_u8_sentinel_0_type, - .single_const_pointer_to_comptime_int_type, => .ptr, .slice_const_u8_type, .slice_const_u8_sentinel_0_type, @@ -3098,11 +3103,16 @@ pub const Object = struct { => unreachable, // values, not types .undef, + .undef_bool, + .undef_usize, + .undef_u1, .zero, .zero_usize, + .zero_u1, .zero_u8, .one, .one_usize, + .one_u1, .one_u8, .four_u8, .negative_one, @@ -4959,7 +4969,8 @@ pub const FuncGen = struct { .error_name => try self.airErrorName(inst), .splat => try self.airSplat(inst), .select => try self.airSelect(inst), - .shuffle => try self.airShuffle(inst), + .shuffle_one => try self.airShuffleOne(inst), + .shuffle_two => try self.airShuffleTwo(inst), .aggregate_init => try self.airAggregateInit(inst), .union_init => try self.airUnionInit(inst), .prefetch => try self.airPrefetch(inst), @@ -8917,6 +8928,8 @@ pub const FuncGen = struct { const rhs = try self.resolveInst(extra.rhs); const lhs_ty = self.typeOf(extra.lhs); + if (lhs_ty.isVector(zcu) and !self.typeOf(extra.rhs).isVector(zcu)) + return self.ng.todo("implement vector shifts with scalar rhs", .{}); const lhs_scalar_ty = lhs_ty.scalarType(zcu); const dest_ty = self.typeOfIndex(inst); @@ -8986,6 +8999,8 @@ pub const FuncGen = struct { const rhs = try self.resolveInst(bin_op.rhs); const lhs_ty = self.typeOf(bin_op.lhs); + if (lhs_ty.isVector(zcu) and !self.typeOf(bin_op.rhs).isVector(zcu)) + return self.ng.todo("implement vector shifts with scalar rhs", .{}); const lhs_scalar_ty = lhs_ty.scalarType(zcu); const casted_rhs = try self.wip.conv(.unsigned, rhs, try o.lowerType(lhs_ty), ""); @@ -8997,14 +9012,17 @@ pub const FuncGen = struct { fn airShl(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.ng.object; + const zcu = o.pt.zcu; const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); - const lhs_type = self.typeOf(bin_op.lhs); + const lhs_ty = self.typeOf(bin_op.lhs); + if (lhs_ty.isVector(zcu) and !self.typeOf(bin_op.rhs).isVector(zcu)) + return self.ng.todo("implement vector shifts with scalar rhs", .{}); - const casted_rhs = try self.wip.conv(.unsigned, rhs, try o.lowerType(lhs_type), ""); + const casted_rhs = try self.wip.conv(.unsigned, rhs, try o.lowerType(lhs_ty), ""); return self.wip.bin(.shl, lhs, casted_rhs, ""); } @@ -9023,6 +9041,8 @@ pub const FuncGen = struct { const llvm_lhs_scalar_ty = llvm_lhs_ty.scalarType(&o.builder); const rhs_ty = self.typeOf(bin_op.rhs); + if (lhs_ty.isVector(zcu) and !rhs_ty.isVector(zcu)) + return self.ng.todo("implement vector shifts with scalar rhs", .{}); const rhs_info = rhs_ty.intInfo(zcu); assert(rhs_info.signedness == .unsigned); const llvm_rhs_ty = try o.lowerType(rhs_ty); @@ -9095,6 +9115,8 @@ pub const FuncGen = struct { const rhs = try self.resolveInst(bin_op.rhs); const lhs_ty = self.typeOf(bin_op.lhs); + if (lhs_ty.isVector(zcu) and !self.typeOf(bin_op.rhs).isVector(zcu)) + return self.ng.todo("implement vector shifts with scalar rhs", .{}); const lhs_scalar_ty = lhs_ty.scalarType(zcu); const casted_rhs = try self.wip.conv(.unsigned, rhs, try o.lowerType(lhs_ty), ""); @@ -9167,11 +9189,7 @@ pub const FuncGen = struct { const is_vector = operand_ty.zigTypeTag(zcu) == .vector; assert(is_vector == (dest_ty.zigTypeTag(zcu) == .vector)); - const min_panic_id: Zcu.SimplePanicId, const max_panic_id: Zcu.SimplePanicId = id: { - if (dest_is_enum) break :id .{ .invalid_enum_value, .invalid_enum_value }; - if (dest_info.signedness == .unsigned) break :id .{ .negative_to_unsigned, .cast_truncated_data }; - break :id .{ .cast_truncated_data, .cast_truncated_data }; - }; + const panic_id: Zcu.SimplePanicId = if (dest_is_enum) .invalid_enum_value else .integer_out_of_bounds; if (have_min_check) { const min_const_scalar = try minIntConst(&o.builder, dest_scalar, operand_scalar_llvm_ty, zcu); @@ -9185,7 +9203,7 @@ pub const FuncGen = struct { const ok_block = try fg.wip.block(1, "IntMinOk"); _ = try fg.wip.brCond(ok, ok_block, fail_block, .none); fg.wip.cursor = .{ .block = fail_block }; - try fg.buildSimplePanic(min_panic_id); + try fg.buildSimplePanic(panic_id); fg.wip.cursor = .{ .block = ok_block }; } @@ -9201,7 +9219,7 @@ pub const FuncGen = struct { const ok_block = try fg.wip.block(1, "IntMaxOk"); _ = try fg.wip.brCond(ok, ok_block, fail_block, .none); fg.wip.cursor = .{ .block = fail_block }; - try fg.buildSimplePanic(max_panic_id); + try fg.buildSimplePanic(panic_id); fg.wip.cursor = .{ .block = ok_block }; } } @@ -9249,8 +9267,6 @@ pub const FuncGen = struct { const operand_ty = self.typeOf(ty_op.operand); const dest_ty = self.typeOfIndex(inst); const target = zcu.getTarget(); - const dest_bits = dest_ty.floatBits(target); - const src_bits = operand_ty.floatBits(target); if (intrinsicsAllowed(dest_ty, target) and intrinsicsAllowed(operand_ty, target)) { return self.wip.cast(.fptrunc, operand, try o.lowerType(dest_ty), ""); @@ -9258,6 +9274,8 @@ pub const FuncGen = struct { const operand_llvm_ty = try o.lowerType(operand_ty); const dest_llvm_ty = try o.lowerType(dest_ty); + const dest_bits = dest_ty.floatBits(target); + const src_bits = operand_ty.floatBits(target); const fn_name = try o.builder.strtabStringFmt("__trunc{s}f{s}f2", .{ compilerRtFloatAbbrev(src_bits), compilerRtFloatAbbrev(dest_bits), }); @@ -9342,11 +9360,12 @@ pub const FuncGen = struct { return self.wip.conv(.unsigned, operand, llvm_dest_ty, ""); } - if (operand_ty.zigTypeTag(zcu) == .int and inst_ty.isPtrAtRuntime(zcu)) { + const operand_scalar_ty = operand_ty.scalarType(zcu); + const inst_scalar_ty = inst_ty.scalarType(zcu); + if (operand_scalar_ty.zigTypeTag(zcu) == .int and inst_scalar_ty.isPtrAtRuntime(zcu)) { return self.wip.cast(.inttoptr, operand, llvm_dest_ty, ""); } - - if (operand_ty.isPtrAtRuntime(zcu) and inst_ty.zigTypeTag(zcu) == .int) { + if (operand_scalar_ty.isPtrAtRuntime(zcu) and inst_scalar_ty.zigTypeTag(zcu) == .int) { return self.wip.cast(.ptrtoint, operand, llvm_dest_ty, ""); } @@ -9644,7 +9663,7 @@ pub const FuncGen = struct { const zcu = o.pt.zcu; const ip = &zcu.intern_pool; for (body_tail[1..]) |body_inst| { - switch (fg.liveness.categorizeOperand(fg.air, body_inst, body_tail[0], ip)) { + switch (fg.liveness.categorizeOperand(fg.air, zcu, body_inst, body_tail[0], ip)) { .none => continue, .write, .noret, .complex => return false, .tomb => return true, @@ -10399,42 +10418,192 @@ pub const FuncGen = struct { return self.wip.select(.normal, pred, a, b, ""); } - fn airShuffle(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { - const o = self.ng.object; + fn airShuffleOne(fg: *FuncGen, inst: Air.Inst.Index) !Builder.Value { + const o = fg.ng.object; const pt = o.pt; const zcu = pt.zcu; - const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; - const extra = self.air.extraData(Air.Shuffle, ty_pl.payload).data; - const a = try self.resolveInst(extra.a); - const b = try self.resolveInst(extra.b); - const mask = Value.fromInterned(extra.mask); - const mask_len = extra.mask_len; - const a_len = self.typeOf(extra.a).vectorLen(zcu); + const gpa = zcu.gpa; - // LLVM uses integers larger than the length of the first array to - // index into the second array. This was deemed unnecessarily fragile - // when changing code, so Zig uses negative numbers to index the - // second vector. These start at -1 and go down, and are easiest to use - // with the ~ operator. Here we convert between the two formats. - const values = try self.gpa.alloc(Builder.Constant, mask_len); - defer self.gpa.free(values); + const unwrapped = fg.air.unwrapShuffleOne(zcu, inst); - for (values, 0..) |*val, i| { - const elem = try mask.elemValue(pt, i); - if (elem.isUndef(zcu)) { - val.* = try o.builder.undefConst(.i32); - } else { - const int = elem.toSignedInt(zcu); - const unsigned: u32 = @intCast(if (int >= 0) int else ~int + a_len); - val.* = try o.builder.intConst(.i32, unsigned); + const operand = try fg.resolveInst(unwrapped.operand); + const mask = unwrapped.mask; + const operand_ty = fg.typeOf(unwrapped.operand); + const llvm_operand_ty = try o.lowerType(operand_ty); + const llvm_result_ty = try o.lowerType(unwrapped.result_ty); + const llvm_elem_ty = try o.lowerType(unwrapped.result_ty.childType(zcu)); + const llvm_poison_elem = try o.builder.poisonConst(llvm_elem_ty); + const llvm_poison_mask_elem = try o.builder.poisonConst(.i32); + const llvm_mask_ty = try o.builder.vectorType(.normal, @intCast(mask.len), .i32); + + // LLVM requires that the two input vectors have the same length, so lowering isn't trivial. + // And, in the words of jacobly0: "llvm sucks at shuffles so we do have to hold its hand at + // least a bit". So, there are two cases here. + // + // If the operand length equals the mask length, we do just the one `shufflevector`, where + // the second operand is a constant vector with comptime-known elements at the right indices + // and poison values elsewhere (in the indices which won't be selected). + // + // Otherwise, we lower to *two* `shufflevector` instructions. The first shuffles the runtime + // operand with an all-poison vector to extract and correctly position all of the runtime + // elements. We also make a constant vector with all of the comptime elements correctly + // positioned. Then, our second instruction selects elements from those "runtime-or-poison" + // and "comptime-or-poison" vectors to compute the result. + + // This buffer is used primarily for the mask constants. + const llvm_elem_buf = try gpa.alloc(Builder.Constant, mask.len); + defer gpa.free(llvm_elem_buf); + + // ...but first, we'll collect all of the comptime-known values. + var any_defined_comptime_value = false; + for (mask, llvm_elem_buf) |mask_elem, *llvm_elem| { + llvm_elem.* = switch (mask_elem.unwrap()) { + .elem => llvm_poison_elem, + .value => |val| if (!Value.fromInterned(val).isUndef(zcu)) elem: { + any_defined_comptime_value = true; + break :elem try o.lowerValue(val); + } else llvm_poison_elem, + }; + } + // This vector is like the result, but runtime elements are replaced with poison. + const comptime_and_poison: Builder.Value = if (any_defined_comptime_value) vec: { + break :vec try o.builder.vectorValue(llvm_result_ty, llvm_elem_buf); + } else try o.builder.poisonValue(llvm_result_ty); + + if (operand_ty.vectorLen(zcu) == mask.len) { + // input length equals mask/output length, so we lower to one instruction + for (mask, llvm_elem_buf, 0..) |mask_elem, *llvm_elem, elem_idx| { + llvm_elem.* = switch (mask_elem.unwrap()) { + .elem => |idx| try o.builder.intConst(.i32, idx), + .value => |val| if (!Value.fromInterned(val).isUndef(zcu)) mask_val: { + break :mask_val try o.builder.intConst(.i32, mask.len + elem_idx); + } else llvm_poison_mask_elem, + }; } + return fg.wip.shuffleVector( + operand, + comptime_and_poison, + try o.builder.vectorValue(llvm_mask_ty, llvm_elem_buf), + "", + ); } - const llvm_mask_value = try o.builder.vectorValue( - try o.builder.vectorType(.normal, mask_len, .i32), - values, + for (mask, llvm_elem_buf) |mask_elem, *llvm_elem| { + llvm_elem.* = switch (mask_elem.unwrap()) { + .elem => |idx| try o.builder.intConst(.i32, idx), + .value => llvm_poison_mask_elem, + }; + } + // This vector is like our result, but all comptime-known elements are poison. + const runtime_and_poison = try fg.wip.shuffleVector( + operand, + try o.builder.poisonValue(llvm_operand_ty), + try o.builder.vectorValue(llvm_mask_ty, llvm_elem_buf), + "", + ); + + if (!any_defined_comptime_value) { + // `comptime_and_poison` is just poison; a second shuffle would be a nop. + return runtime_and_poison; + } + + // In this second shuffle, the inputs, the mask, and the output all have the same length. + for (mask, llvm_elem_buf, 0..) |mask_elem, *llvm_elem, elem_idx| { + llvm_elem.* = switch (mask_elem.unwrap()) { + .elem => try o.builder.intConst(.i32, elem_idx), + .value => |val| if (!Value.fromInterned(val).isUndef(zcu)) mask_val: { + break :mask_val try o.builder.intConst(.i32, mask.len + elem_idx); + } else llvm_poison_mask_elem, + }; + } + // Merge the runtime and comptime elements with the mask we just built. + return fg.wip.shuffleVector( + runtime_and_poison, + comptime_and_poison, + try o.builder.vectorValue(llvm_mask_ty, llvm_elem_buf), + "", + ); + } + + fn airShuffleTwo(fg: *FuncGen, inst: Air.Inst.Index) !Builder.Value { + const o = fg.ng.object; + const pt = o.pt; + const zcu = pt.zcu; + const gpa = zcu.gpa; + + const unwrapped = fg.air.unwrapShuffleTwo(zcu, inst); + + const mask = unwrapped.mask; + const llvm_elem_ty = try o.lowerType(unwrapped.result_ty.childType(zcu)); + const llvm_mask_ty = try o.builder.vectorType(.normal, @intCast(mask.len), .i32); + const llvm_poison_mask_elem = try o.builder.poisonConst(.i32); + + // This is kind of simpler than in `airShuffleOne`. We extend the shorter vector to the + // length of the longer one with an initial `shufflevector` if necessary, and then do the + // actual computation with a second `shufflevector`. + + const operand_a_len = fg.typeOf(unwrapped.operand_a).vectorLen(zcu); + const operand_b_len = fg.typeOf(unwrapped.operand_b).vectorLen(zcu); + const operand_len: u32 = @max(operand_a_len, operand_b_len); + + // If we need to extend an operand, this is the type that mask will have. + const llvm_operand_mask_ty = try o.builder.vectorType(.normal, operand_len, .i32); + + const llvm_elem_buf = try gpa.alloc(Builder.Constant, @max(mask.len, operand_len)); + defer gpa.free(llvm_elem_buf); + + const operand_a: Builder.Value = extend: { + const raw = try fg.resolveInst(unwrapped.operand_a); + if (operand_a_len == operand_len) break :extend raw; + // Extend with a `shufflevector`, with a mask `<0, 1, ..., n, poison, poison, ..., poison>` + const mask_elems = llvm_elem_buf[0..operand_len]; + for (mask_elems[0..operand_a_len], 0..) |*llvm_elem, elem_idx| { + llvm_elem.* = try o.builder.intConst(.i32, elem_idx); + } + @memset(mask_elems[operand_a_len..], llvm_poison_mask_elem); + const llvm_this_operand_ty = try o.builder.vectorType(.normal, operand_a_len, llvm_elem_ty); + break :extend try fg.wip.shuffleVector( + raw, + try o.builder.poisonValue(llvm_this_operand_ty), + try o.builder.vectorValue(llvm_operand_mask_ty, mask_elems), + "", + ); + }; + const operand_b: Builder.Value = extend: { + const raw = try fg.resolveInst(unwrapped.operand_b); + if (operand_b_len == operand_len) break :extend raw; + // Extend with a `shufflevector`, with a mask `<0, 1, ..., n, poison, poison, ..., poison>` + const mask_elems = llvm_elem_buf[0..operand_len]; + for (mask_elems[0..operand_b_len], 0..) |*llvm_elem, elem_idx| { + llvm_elem.* = try o.builder.intConst(.i32, elem_idx); + } + @memset(mask_elems[operand_b_len..], llvm_poison_mask_elem); + const llvm_this_operand_ty = try o.builder.vectorType(.normal, operand_b_len, llvm_elem_ty); + break :extend try fg.wip.shuffleVector( + raw, + try o.builder.poisonValue(llvm_this_operand_ty), + try o.builder.vectorValue(llvm_operand_mask_ty, mask_elems), + "", + ); + }; + + // `operand_a` and `operand_b` now have the same length (we've extended the shorter one with + // an initial shuffle if necessary). Now for the easy bit. + + const mask_elems = llvm_elem_buf[0..mask.len]; + for (mask, mask_elems) |mask_elem, *llvm_mask_elem| { + llvm_mask_elem.* = switch (mask_elem.unwrap()) { + .a_elem => |idx| try o.builder.intConst(.i32, idx), + .b_elem => |idx| try o.builder.intConst(.i32, operand_len + idx), + .undef => llvm_poison_mask_elem, + }; + } + return fg.wip.shuffleVector( + operand_a, + operand_b, + try o.builder.vectorValue(llvm_mask_ty, mask_elems), + "", ); - return self.wip.shuffleVector(a, b, llvm_mask_value, ""); } /// Reduce a vector by repeatedly applying `llvm_fn` to produce an accumulated result. diff --git a/src/codegen/spirv.zig b/src/codegen/spirv.zig index 5041634a75..f83c6979ff 100644 --- a/src/codegen/spirv.zig +++ b/src/codegen/spirv.zig @@ -28,6 +28,15 @@ const SpvAssembler = @import("spirv/Assembler.zig"); const InstMap = std.AutoHashMapUnmanaged(Air.Inst.Index, IdRef); +pub fn legalizeFeatures(_: *const std.Target) *const Air.Legalize.Features { + return comptime &.initMany(&.{ + .expand_intcast_safe, + .expand_add_safe, + .expand_sub_safe, + .expand_mul_safe, + }); +} + pub const zig_call_abi_ver = 3; pub const big_int_bits = 32; @@ -3243,7 +3252,8 @@ const NavGen = struct { .splat => try self.airSplat(inst), .reduce, .reduce_optimized => try self.airReduce(inst), - .shuffle => try self.airShuffle(inst), + .shuffle_one => try self.airShuffleOne(inst), + .shuffle_two => try self.airShuffleTwo(inst), .ptr_add => try self.airPtrAdd(inst), .ptr_sub => try self.airPtrSub(inst), @@ -3380,6 +3390,10 @@ const NavGen = struct { const zcu = self.pt.zcu; const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; + if (self.typeOf(bin_op.lhs).isVector(zcu) and !self.typeOf(bin_op.rhs).isVector(zcu)) { + return self.fail("vector shift with scalar rhs", .{}); + } + const base = try self.temporary(bin_op.lhs); const shift = try self.temporary(bin_op.rhs); @@ -3866,6 +3880,10 @@ const NavGen = struct { const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; const extra = self.air.extraData(Air.Bin, ty_pl.payload).data; + if (self.typeOf(extra.lhs).isVector(zcu) and !self.typeOf(extra.rhs).isVector(zcu)) { + return self.fail("vector shift with scalar rhs", .{}); + } + const base = try self.temporary(extra.lhs); const shift = try self.temporary(extra.rhs); @@ -4030,40 +4048,57 @@ const NavGen = struct { return result_id; } - fn airShuffle(self: *NavGen, inst: Air.Inst.Index) !?IdRef { - const pt = self.pt; + fn airShuffleOne(ng: *NavGen, inst: Air.Inst.Index) !?IdRef { + const pt = ng.pt; const zcu = pt.zcu; - const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; - const extra = self.air.extraData(Air.Shuffle, ty_pl.payload).data; - const a = try self.resolve(extra.a); - const b = try self.resolve(extra.b); - const mask = Value.fromInterned(extra.mask); + const gpa = zcu.gpa; - // Note: number of components in the result, a, and b may differ. - const result_ty = self.typeOfIndex(inst); - const scalar_ty = result_ty.scalarType(zcu); - const scalar_ty_id = try self.resolveType(scalar_ty, .direct); + const unwrapped = ng.air.unwrapShuffleOne(zcu, inst); + const mask = unwrapped.mask; + const result_ty = unwrapped.result_ty; + const elem_ty = result_ty.childType(zcu); + const operand = try ng.resolve(unwrapped.operand); - const constituents = try self.gpa.alloc(IdRef, result_ty.vectorLen(zcu)); - defer self.gpa.free(constituents); + const constituents = try gpa.alloc(IdRef, mask.len); + defer gpa.free(constituents); - for (constituents, 0..) |*id, i| { - const elem = try mask.elemValue(pt, i); - if (elem.isUndef(zcu)) { - id.* = try self.spv.constUndef(scalar_ty_id); - continue; - } - - const index = elem.toSignedInt(zcu); - if (index >= 0) { - id.* = try self.extractVectorComponent(scalar_ty, a, @intCast(index)); - } else { - id.* = try self.extractVectorComponent(scalar_ty, b, @intCast(~index)); - } + for (constituents, mask) |*id, mask_elem| { + id.* = switch (mask_elem.unwrap()) { + .elem => |idx| try ng.extractVectorComponent(elem_ty, operand, idx), + .value => |val| try ng.constant(elem_ty, .fromInterned(val), .direct), + }; } - const result_ty_id = try self.resolveType(result_ty, .direct); - return try self.constructComposite(result_ty_id, constituents); + const result_ty_id = try ng.resolveType(result_ty, .direct); + return try ng.constructComposite(result_ty_id, constituents); + } + + fn airShuffleTwo(ng: *NavGen, inst: Air.Inst.Index) !?IdRef { + const pt = ng.pt; + const zcu = pt.zcu; + const gpa = zcu.gpa; + + const unwrapped = ng.air.unwrapShuffleTwo(zcu, inst); + const mask = unwrapped.mask; + const result_ty = unwrapped.result_ty; + const elem_ty = result_ty.childType(zcu); + const elem_ty_id = try ng.resolveType(elem_ty, .direct); + const operand_a = try ng.resolve(unwrapped.operand_a); + const operand_b = try ng.resolve(unwrapped.operand_b); + + const constituents = try gpa.alloc(IdRef, mask.len); + defer gpa.free(constituents); + + for (constituents, mask) |*id, mask_elem| { + id.* = switch (mask_elem.unwrap()) { + .a_elem => |idx| try ng.extractVectorComponent(elem_ty, operand_a, idx), + .b_elem => |idx| try ng.extractVectorComponent(elem_ty, operand_b, idx), + .undef => try ng.spv.constUndef(elem_ty_id), + }; + } + + const result_ty_id = try ng.resolveType(result_ty, .direct); + return try ng.constructComposite(result_ty_id, constituents); } fn indicesToIds(self: *NavGen, indices: []const u32) ![]IdRef { diff --git a/src/dev.zig b/src/dev.zig index 473cb4a8d0..1dc8264ebc 100644 --- a/src/dev.zig +++ b/src/dev.zig @@ -1,5 +1,8 @@ pub const Env = enum { /// zig1 features + /// - `-ofmt=c` only + /// - `-OReleaseFast` or `-OReleaseSmall` only + /// - no `@setRuntimeSafety(true)` bootstrap, /// zig2 features @@ -67,6 +70,7 @@ pub const Env = enum { .incremental, .ast_gen, .sema, + .legalize, .llvm_backend, .c_backend, .wasm_backend, @@ -144,6 +148,7 @@ pub const Env = enum { .build_command, .stdio_listen, .incremental, + .legalize, .x86_64_backend, .elf_linker, => true, @@ -222,6 +227,7 @@ pub const Feature = enum { incremental, ast_gen, sema, + legalize, llvm_backend, c_backend, diff --git a/src/mutable_value.zig b/src/mutable_value.zig index d894adfa33..f1010601dc 100644 --- a/src/mutable_value.zig +++ b/src/mutable_value.zig @@ -260,7 +260,7 @@ pub const MutableValue = union(enum) { const ptr = try arena.create(MutableValue); const len = try arena.create(MutableValue); ptr.* = .{ .interned = try pt.intern(.{ .undef = ip.slicePtrType(ty_ip) }) }; - len.* = .{ .interned = try pt.intern(.{ .undef = .usize_type }) }; + len.* = .{ .interned = .undef_usize }; mv.* = .{ .slice = .{ .ty = ty_ip, .ptr = ptr, @@ -464,7 +464,7 @@ pub const MutableValue = union(enum) { return switch (field_idx) { Value.slice_ptr_index => .{ .interned = Value.fromInterned(ip_index).slicePtr(pt.zcu).toIntern() }, Value.slice_len_index => .{ .interned = switch (pt.zcu.intern_pool.indexToKey(ip_index)) { - .undef => try pt.intern(.{ .undef = .usize_type }), + .undef => .undef_usize, .slice => |s| s.len, else => unreachable, } }, diff --git a/src/print_air.zig b/src/print_air.zig index 0f658dcd9f..6085adbcdc 100644 --- a/src/print_air.zig +++ b/src/print_air.zig @@ -315,7 +315,8 @@ const Writer = struct { .wasm_memory_grow => try w.writeWasmMemoryGrow(s, inst), .mul_add => try w.writeMulAdd(s, inst), .select => try w.writeSelect(s, inst), - .shuffle => try w.writeShuffle(s, inst), + .shuffle_one => try w.writeShuffleOne(s, inst), + .shuffle_two => try w.writeShuffleTwo(s, inst), .reduce, .reduce_optimized => try w.writeReduce(s, inst), .cmp_vector, .cmp_vector_optimized => try w.writeCmpVector(s, inst), .vector_store_elem => try w.writeVectorStoreElem(s, inst), @@ -499,14 +500,39 @@ const Writer = struct { try w.writeOperand(s, inst, 2, pl_op.operand); } - fn writeShuffle(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void { - const ty_pl = w.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; - const extra = w.air.extraData(Air.Shuffle, ty_pl.payload).data; - - try w.writeOperand(s, inst, 0, extra.a); + fn writeShuffleOne(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void { + const unwrapped = w.air.unwrapShuffleOne(w.pt.zcu, inst); + try w.writeType(s, unwrapped.result_ty); try s.writeAll(", "); - try w.writeOperand(s, inst, 1, extra.b); - try s.print(", mask {d}, len {d}", .{ extra.mask, extra.mask_len }); + try w.writeOperand(s, inst, 0, unwrapped.operand); + try s.writeAll(", ["); + for (unwrapped.mask, 0..) |mask_elem, mask_idx| { + if (mask_idx > 0) try s.writeAll(", "); + switch (mask_elem.unwrap()) { + .elem => |idx| try s.print("elem {d}", .{idx}), + .value => |val| try s.print("val {}", .{Value.fromInterned(val).fmtValue(w.pt)}), + } + } + try s.writeByte(']'); + } + + fn writeShuffleTwo(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void { + const unwrapped = w.air.unwrapShuffleTwo(w.pt.zcu, inst); + try w.writeType(s, unwrapped.result_ty); + try s.writeAll(", "); + try w.writeOperand(s, inst, 0, unwrapped.operand_a); + try s.writeAll(", "); + try w.writeOperand(s, inst, 1, unwrapped.operand_b); + try s.writeAll(", ["); + for (unwrapped.mask, 0..) |mask_elem, mask_idx| { + if (mask_idx > 0) try s.writeAll(", "); + switch (mask_elem.unwrap()) { + .a_elem => |idx| try s.print("a_elem {d}", .{idx}), + .b_elem => |idx| try s.print("b_elem {d}", .{idx}), + .undef => try s.writeAll("undef"), + } + } + try s.writeByte(']'); } fn writeSelect(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void { diff --git a/src/target.zig b/src/target.zig index 6119b002a4..4b0cc20bda 100644 --- a/src/target.zig +++ b/src/target.zig @@ -842,17 +842,9 @@ pub inline fn backendSupportsFeature(backend: std.builtin.CompilerBackend, compt .stage2_c, .stage2_llvm, .stage2_x86_64 => true, else => false, }, - .safety_checked_instructions => switch (backend) { - .stage2_llvm => true, - else => false, - }, .separate_thread => switch (backend) { .stage2_llvm => false, else => true, }, - .all_vector_instructions => switch (backend) { - .stage2_x86_64 => true, - else => false, - }, }; } diff --git a/stage1/zig.h b/stage1/zig.h index 2d9e7a5626..229d6a7973 100644 --- a/stage1/zig.h +++ b/stage1/zig.h @@ -481,6 +481,7 @@ zig_extern void *memcpy (void *zig_restrict, void const *zig_restrict, size_t); zig_extern void *memset (void *, int, size_t); +zig_extern void *memmove (void *, void const *, size_t); /* ================ Bool and 8/16/32/64-bit Integer Support ================= */ @@ -1114,14 +1115,15 @@ static inline bool zig_mulo_i16(int16_t *res, int16_t lhs, int16_t rhs, uint8_t \ static inline uint##w##_t zig_shls_u##w(uint##w##_t lhs, uint##w##_t rhs, uint8_t bits) { \ uint##w##_t res; \ - if (rhs >= bits) return lhs != UINT##w##_C(0) ? zig_maxInt_u(w, bits) : lhs; \ - return zig_shlo_u##w(&res, lhs, (uint8_t)rhs, bits) ? zig_maxInt_u(w, bits) : res; \ + if (rhs < bits && !zig_shlo_u##w(&res, lhs, rhs, bits)) return res; \ + return lhs == INT##w##_C(0) ? INT##w##_C(0) : zig_maxInt_u(w, bits); \ } \ \ - static inline int##w##_t zig_shls_i##w(int##w##_t lhs, int##w##_t rhs, uint8_t bits) { \ + static inline int##w##_t zig_shls_i##w(int##w##_t lhs, uint##w##_t rhs, uint8_t bits) { \ int##w##_t res; \ - if ((uint##w##_t)rhs < (uint##w##_t)bits && !zig_shlo_i##w(&res, lhs, (uint8_t)rhs, bits)) return res; \ - return lhs < INT##w##_C(0) ? zig_minInt_i(w, bits) : zig_maxInt_i(w, bits); \ + if (rhs < bits && !zig_shlo_i##w(&res, lhs, rhs, bits)) return res; \ + return lhs == INT##w##_C(0) ? INT##w##_C(0) : \ + lhs < INT##w##_C(0) ? zig_minInt_i(w, bits) : zig_maxInt_i(w, bits); \ } \ \ static inline uint##w##_t zig_adds_u##w(uint##w##_t lhs, uint##w##_t rhs, uint8_t bits) { \ @@ -1850,15 +1852,23 @@ static inline bool zig_shlo_i128(zig_i128 *res, zig_i128 lhs, uint8_t rhs, uint8 static inline zig_u128 zig_shls_u128(zig_u128 lhs, zig_u128 rhs, uint8_t bits) { zig_u128 res; - if (zig_cmp_u128(rhs, zig_make_u128(0, bits)) >= INT32_C(0)) - return zig_cmp_u128(lhs, zig_make_u128(0, 0)) != INT32_C(0) ? zig_maxInt_u(128, bits) : lhs; - return zig_shlo_u128(&res, lhs, (uint8_t)zig_lo_u128(rhs), bits) ? zig_maxInt_u(128, bits) : res; + if (zig_cmp_u128(rhs, zig_make_u128(0, bits)) < INT32_C(0) && !zig_shlo_u128(&res, lhs, (uint8_t)zig_lo_u128(rhs), bits)) return res; + switch (zig_cmp_u128(lhs, zig_make_u128(0, 0))) { + case 0: return zig_make_u128(0, 0); + case 1: return zig_maxInt_u(128, bits); + default: zig_unreachable(); + } } -static inline zig_i128 zig_shls_i128(zig_i128 lhs, zig_i128 rhs, uint8_t bits) { +static inline zig_i128 zig_shls_i128(zig_i128 lhs, zig_u128 rhs, uint8_t bits) { zig_i128 res; - if (zig_cmp_u128(zig_bitCast_u128(rhs), zig_make_u128(0, bits)) < INT32_C(0) && !zig_shlo_i128(&res, lhs, (uint8_t)zig_lo_i128(rhs), bits)) return res; - return zig_cmp_i128(lhs, zig_make_i128(0, 0)) < INT32_C(0) ? zig_minInt_i(128, bits) : zig_maxInt_i(128, bits); + if (zig_cmp_u128(rhs, zig_make_u128(0, bits)) < INT32_C(0) && !zig_shlo_i128(&res, lhs, (uint8_t)zig_lo_u128(rhs), bits)) return res; + switch (zig_cmp_i128(lhs, zig_make_i128(0, 0))) { + case -1: return zig_minInt_i(128, bits); + case 0: return zig_make_i128(0, 0); + case 1: return zig_maxInt_i(128, bits); + default: zig_unreachable(); + } } static inline zig_u128 zig_adds_u128(zig_u128 lhs, zig_u128 rhs, uint8_t bits) { diff --git a/stage1/zig1.wasm b/stage1/zig1.wasm index 2eb55170d0..bb14b1b213 100644 Binary files a/stage1/zig1.wasm and b/stage1/zig1.wasm differ diff --git a/test/behavior/abs.zig b/test/behavior/abs.zig index 657d801435..a3be30837a 100644 --- a/test/behavior/abs.zig +++ b/test/behavior/abs.zig @@ -96,7 +96,6 @@ test "@abs big int <= 128 bits" { if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; try comptime testAbsSignedBigInt(); try testAbsSignedBigInt(); @@ -211,7 +210,6 @@ fn testAbsFloats(comptime T: type) !void { test "@abs int vectors" { if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO diff --git a/test/behavior/basic.zig b/test/behavior/basic.zig index bc9c71dd17..b577a776c0 100644 --- a/test/behavior/basic.zig +++ b/test/behavior/basic.zig @@ -837,7 +837,6 @@ test "extern variable with non-pointer opaque type" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; // TODO @export(&var_to_export, .{ .name = "opaque_extern_var" }); diff --git a/test/behavior/bitcast.zig b/test/behavior/bitcast.zig index 1ffe301dd3..bbca00adc8 100644 --- a/test/behavior/bitcast.zig +++ b/test/behavior/bitcast.zig @@ -384,7 +384,6 @@ test "comptime bitcast with fields following f80" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; const FloatT = extern struct { f: f80, x: u128 align(16) }; const x: FloatT = .{ .f = 0.5, .x = 123 }; diff --git a/test/behavior/bitreverse.zig b/test/behavior/bitreverse.zig index 965a820141..3a34a5396e 100644 --- a/test/behavior/bitreverse.zig +++ b/test/behavior/bitreverse.zig @@ -12,7 +12,6 @@ test "@bitReverse" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try comptime testBitReverse(); @@ -123,7 +122,6 @@ fn vector8() !void { test "bitReverse vectors u8" { if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO @@ -144,7 +142,6 @@ fn vector16() !void { test "bitReverse vectors u16" { if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO @@ -165,7 +162,6 @@ fn vector24() !void { test "bitReverse vectors u24" { if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO diff --git a/test/behavior/byteswap.zig b/test/behavior/byteswap.zig index 0c6e655b25..9920fd22fb 100644 --- a/test/behavior/byteswap.zig +++ b/test/behavior/byteswap.zig @@ -39,7 +39,6 @@ test "@byteSwap integers" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; const ByteSwapIntTest = struct { fn run() !void { @@ -95,7 +94,6 @@ fn vector8() !void { test "@byteSwap vectors u8" { if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO @@ -116,7 +114,6 @@ fn vector16() !void { test "@byteSwap vectors u16" { if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO @@ -137,7 +134,6 @@ fn vector24() !void { test "@byteSwap vectors u24" { if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO diff --git a/test/behavior/cast.zig b/test/behavior/cast.zig index b2faf3e292..ea21e1d6d9 100644 --- a/test/behavior/cast.zig +++ b/test/behavior/cast.zig @@ -617,7 +617,6 @@ test "@intCast on vector" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_llvm and builtin.cpu.arch == .hexagon) return error.SkipZigTest; @@ -2520,7 +2519,6 @@ test "@ptrFromInt on vector" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; const S = struct { @@ -2592,7 +2590,6 @@ test "@intFromFloat on vector" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; const S = struct { fn doTheTest() !void { @@ -2693,7 +2690,6 @@ test "@intCast vector of signed integer" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_llvm and builtin.cpu.arch == .hexagon) return error.SkipZigTest; diff --git a/test/behavior/extern.zig b/test/behavior/extern.zig index 48b82fdf58..52390f71c4 100644 --- a/test/behavior/extern.zig +++ b/test/behavior/extern.zig @@ -5,7 +5,6 @@ const expect = std.testing.expect; test "anyopaque extern symbol" { if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; const a = @extern(*anyopaque, .{ .name = "a_mystery_symbol" }); diff --git a/test/behavior/floatop.zig b/test/behavior/floatop.zig index bbd376c784..9baa42880b 100644 --- a/test/behavior/floatop.zig +++ b/test/behavior/floatop.zig @@ -14,9 +14,11 @@ fn epsForType(comptime T: type) T { } test "add f16" { - if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt == .coff and + !comptime std.Target.x86.featureSetHas(builtin.cpu.features, .f16c)) return error.SkipZigTest; + try testAdd(f16); try comptime testAdd(f16); } @@ -123,10 +125,12 @@ fn testMul(comptime T: type) !void { test "cmp f16" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; if (builtin.cpu.arch.isArm() and builtin.target.abi.float() == .soft) return error.SkipZigTest; // https://github.com/ziglang/zig/issues/21234 + if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt == .coff and + !comptime std.Target.x86.featureSetHas(builtin.cpu.features, .f16c)) return error.SkipZigTest; + try testCmp(f16); try comptime testCmp(f16); } @@ -135,7 +139,6 @@ test "cmp f32" { if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.cpu.arch.isArm() and builtin.target.abi.float() == .soft) return error.SkipZigTest; // https://github.com/ziglang/zig/issues/21234 - if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; try testCmp(f32); try comptime testCmp(f32); @@ -144,7 +147,6 @@ test "cmp f32" { test "cmp f64" { if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; if (builtin.cpu.arch.isArm() and builtin.target.abi.float() == .soft) return error.SkipZigTest; // https://github.com/ziglang/zig/issues/21234 try testCmp(f64); @@ -340,9 +342,11 @@ test "different sized float comparisons" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt == .coff and + !comptime std.Target.x86.featureSetHas(builtin.cpu.features, .f16c)) return error.SkipZigTest; + try testDifferentSizedFloatComparisons(); try comptime testDifferentSizedFloatComparisons(); } @@ -388,10 +392,12 @@ test "@sqrt f16" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt == .coff and + !comptime std.Target.x86.featureSetHas(builtin.cpu.features, .f16c)) return error.SkipZigTest; + try testSqrt(f16); try comptime testSqrt(f16); } @@ -400,7 +406,6 @@ test "@sqrt f32/f64" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; try testSqrt(f32); @@ -1132,9 +1137,11 @@ test "@abs f16" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt == .coff and + !comptime std.Target.x86.featureSetHas(builtin.cpu.features, .f16c)) return error.SkipZigTest; + try testFabs(f16); try comptime testFabs(f16); } @@ -1266,9 +1273,11 @@ test "@floor f32/f64" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt == .coff and + !comptime std.Target.x86.featureSetHas(builtin.cpu.features, .sse4_1)) return error.SkipZigTest; + try testFloor(f32); try comptime testFloor(f32); try testFloor(f64); @@ -1332,7 +1341,9 @@ test "@floor with vectors" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; + + if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt == .coff and + !comptime std.Target.x86.featureSetHas(builtin.cpu.features, .sse4_1)) return error.SkipZigTest; try testFloorWithVectors(); try comptime testFloorWithVectors(); @@ -1363,9 +1374,11 @@ test "@ceil f32/f64" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt == .coff and + !comptime std.Target.x86.featureSetHas(builtin.cpu.features, .sse4_1)) return error.SkipZigTest; + try testCeil(f32); try comptime testCeil(f32); try testCeil(f64); @@ -1429,7 +1442,9 @@ test "@ceil with vectors" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; + + if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt == .coff and + !comptime std.Target.x86.featureSetHas(builtin.cpu.features, .sse4_1)) return error.SkipZigTest; try testCeilWithVectors(); try comptime testCeilWithVectors(); @@ -1460,9 +1475,11 @@ test "@trunc f32/f64" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt == .coff and + !comptime std.Target.x86.featureSetHas(builtin.cpu.features, .sse4_1)) return error.SkipZigTest; + try testTrunc(f32); try comptime testTrunc(f32); try testTrunc(f64); @@ -1526,7 +1543,9 @@ test "@trunc with vectors" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; + + if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt == .coff and + !comptime std.Target.x86.featureSetHas(builtin.cpu.features, .sse4_1)) return error.SkipZigTest; try testTruncWithVectors(); try comptime testTruncWithVectors(); @@ -1546,9 +1565,11 @@ test "neg f16" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt == .coff and + !comptime std.Target.x86.featureSetHas(builtin.cpu.features, .f16c)) return error.SkipZigTest; + if (builtin.os.tag == .freebsd) { // TODO file issue to track this failure return error.SkipZigTest; diff --git a/test/behavior/fn.zig b/test/behavior/fn.zig index cff4cf567e..bef323e4ae 100644 --- a/test/behavior/fn.zig +++ b/test/behavior/fn.zig @@ -429,7 +429,6 @@ test "implicit cast function to function ptr" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; const S1 = struct { export fn someFunctionThatReturnsAValue() c_int { diff --git a/test/behavior/math.zig b/test/behavior/math.zig index 5cb41aa228..8eec0a4df9 100644 --- a/test/behavior/math.zig +++ b/test/behavior/math.zig @@ -85,7 +85,6 @@ test "@clz big ints" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try testClzBigInts(); @@ -103,7 +102,6 @@ fn testOneClz(comptime T: type, x: T) u32 { test "@clz vectors" { if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO @@ -173,7 +171,6 @@ fn testOneCtz(comptime T: type, x: T) u32 { } test "@ctz 128-bit integers" { - if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO @@ -193,7 +190,6 @@ fn testCtz128() !void { test "@ctz vectors" { if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO @@ -475,10 +471,12 @@ test "division" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt == .coff and + !comptime std.Target.x86.featureSetHas(builtin.cpu.features, .f16c)) return error.SkipZigTest; + try testIntDivision(); try comptime testIntDivision(); @@ -1623,10 +1621,10 @@ test "vector integer addition" { if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; const S = struct { fn doTheTest() !void { @@ -1694,9 +1692,6 @@ test "vector comparison" { if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_x86_64 and - !comptime std.Target.x86.featureSetHas(builtin.cpu.features, .avx2)) return error.SkipZigTest; - const S = struct { fn doTheTest() !void { var a: @Vector(6, i32) = [_]i32{ 1, 3, -1, 5, 7, 9 }; @@ -1785,7 +1780,6 @@ test "mod lazy values" { test "@clz works on both vector and scalar inputs" { if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO @@ -1807,7 +1801,6 @@ test "runtime comparison to NaN is comptime-known" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; if (builtin.cpu.arch.isArm() and builtin.target.abi.float() == .soft) return error.SkipZigTest; // https://github.com/ziglang/zig/issues/21234 @@ -1838,7 +1831,6 @@ test "runtime int comparison to inf is comptime-known" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; if (builtin.cpu.arch.isArm() and builtin.target.abi.float() == .soft) return error.SkipZigTest; // https://github.com/ziglang/zig/issues/21234 @@ -1936,7 +1928,9 @@ test "float vector division of comptime zero by runtime nan is nan" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; + + if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt == .coff and + !comptime std.Target.x86.featureSetHas(builtin.cpu.features, .sse4_1)) return error.SkipZigTest; const ct_zero: @Vector(1, f32) = .{0}; var rt_nan: @Vector(1, f32) = .{math.nan(f32)}; diff --git a/test/behavior/maximum_minimum.zig b/test/behavior/maximum_minimum.zig index 953dca0ca2..911a221fe5 100644 --- a/test/behavior/maximum_minimum.zig +++ b/test/behavior/maximum_minimum.zig @@ -34,7 +34,6 @@ test "@max on vectors" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; const S = struct { fn doTheTest() !void { @@ -90,7 +89,6 @@ test "@min for vectors" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; const S = struct { fn doTheTest() !void { @@ -206,7 +204,6 @@ test "@min/@max notices vector bounds" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; var x: @Vector(2, u16) = .{ 140, 40 }; const y: @Vector(2, u64) = .{ 5, 100 }; @@ -260,7 +257,6 @@ test "@min/@max notices bounds from vector types" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; var x: @Vector(2, u16) = .{ 30, 67 }; var y: @Vector(2, u32) = .{ 20, 500 }; @@ -303,7 +299,6 @@ test "@min/@max notices bounds from vector types when element of comptime-known if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; var x: @Vector(2, u32) = .{ 1_000_000, 12345 }; _ = &x; @@ -375,7 +370,6 @@ test "@min/@max with runtime vectors of signed and unsigned integers of same siz if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; const S = struct { fn min(a: @Vector(2, i32), b: @Vector(2, u32)) @Vector(2, i32) { diff --git a/test/behavior/muladd.zig b/test/behavior/muladd.zig index a9a366c3de..efe2e52073 100644 --- a/test/behavior/muladd.zig +++ b/test/behavior/muladd.zig @@ -6,10 +6,12 @@ test "@mulAdd" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt == .coff and + !comptime std.Target.x86.featureSetHas(builtin.cpu.features, .fma)) return error.SkipZigTest; + try comptime testMulAdd(); try testMulAdd(); } @@ -137,10 +139,12 @@ test "vector f32" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt == .coff and + !comptime std.Target.x86.featureSetHas(builtin.cpu.features, .fma)) return error.SkipZigTest; + try comptime vector32(); try vector32(); } @@ -163,10 +167,12 @@ test "vector f64" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt == .coff and + !comptime std.Target.x86.featureSetHas(builtin.cpu.features, .fma)) return error.SkipZigTest; + try comptime vector64(); try vector64(); } diff --git a/test/behavior/packed-struct.zig b/test/behavior/packed-struct.zig index be4a73be4f..779157e917 100644 --- a/test/behavior/packed-struct.zig +++ b/test/behavior/packed-struct.zig @@ -1307,6 +1307,17 @@ test "packed struct equality" { comptime try S.doTest(x, y); } +test "packed struct equality ignores padding bits" { + if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + + const S = packed struct { b: bool }; + var s: S = undefined; + s.b = true; + try std.testing.expect(s != S{ .b = false }); + try std.testing.expect(s == S{ .b = true }); +} + test "packed struct with signed field" { var s: packed struct { a: i2, diff --git a/test/behavior/popcount.zig b/test/behavior/popcount.zig index 1bf5f96515..0a79933970 100644 --- a/test/behavior/popcount.zig +++ b/test/behavior/popcount.zig @@ -77,7 +77,6 @@ fn testPopCountIntegers() !void { test "@popCount vectors" { if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO diff --git a/test/behavior/select.zig b/test/behavior/select.zig index b64ce8c0b5..8950d512bf 100644 --- a/test/behavior/select.zig +++ b/test/behavior/select.zig @@ -41,8 +41,6 @@ test "@select arrays" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_x86_64 and - !comptime std.Target.x86.featureSetHas(builtin.cpu.features, .avx2)) return error.SkipZigTest; try comptime selectArrays(); try selectArrays(); @@ -70,7 +68,6 @@ fn selectArrays() !void { test "@select compare result" { if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; if (builtin.zig_backend == .stage2_llvm and builtin.cpu.arch == .hexagon) return error.SkipZigTest; const S = struct { diff --git a/test/behavior/shuffle.zig b/test/behavior/shuffle.zig index 125a0ddf7a..12e613d3d5 100644 --- a/test/behavior/shuffle.zig +++ b/test/behavior/shuffle.zig @@ -10,8 +10,6 @@ test "@shuffle int" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_x86_64 and - !comptime std.Target.x86.featureSetHas(builtin.cpu.features, .ssse3)) return error.SkipZigTest; const S = struct { fn doTheTest() !void { @@ -53,7 +51,6 @@ test "@shuffle int" { test "@shuffle int strange sizes" { if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO @@ -136,7 +133,6 @@ fn testShuffle( test "@shuffle bool 1" { if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO @@ -160,7 +156,6 @@ test "@shuffle bool 1" { test "@shuffle bool 2" { if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO diff --git a/test/behavior/union.zig b/test/behavior/union.zig index e4e4f95347..478fe999b7 100644 --- a/test/behavior/union.zig +++ b/test/behavior/union.zig @@ -282,6 +282,7 @@ test "cast union to tag type of union" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; try testCastUnionToTag(); try comptime testCastUnionToTag(); diff --git a/test/behavior/vector.zig b/test/behavior/vector.zig index 129a551fb8..9bd37dd4e5 100644 --- a/test/behavior/vector.zig +++ b/test/behavior/vector.zig @@ -31,7 +31,6 @@ test "vector wrap operators" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; const S = struct { fn doTheTest() !void { @@ -76,12 +75,12 @@ test "vector bin compares with mem.eql" { test "vector int operators" { if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; const S = struct { fn doTheTest() !void { @@ -249,9 +248,11 @@ test "array to vector with element type coercion" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt == .coff and + !comptime std.Target.x86.featureSetHas(builtin.cpu.features, .f16c)) return error.SkipZigTest; + const S = struct { fn doTheTest() !void { var foo: f16 = 3.14; @@ -286,11 +287,11 @@ test "peer type resolution with coercible element types" { test "tuple to vector" { if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; const S = struct { fn doTheTest() !void { @@ -652,7 +653,6 @@ test "vector division operators" { test "vector bitwise not operator" { if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO @@ -684,12 +684,12 @@ test "vector bitwise not operator" { test "vector shift operators" { if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; const S = struct { fn doTheTestShift(x: anytype, y: anytype) !void { @@ -908,8 +908,6 @@ test "mask parameter of @shuffle is comptime scope" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_x86_64 and - !comptime std.Target.x86.featureSetHas(builtin.cpu.features, .ssse3)) return error.SkipZigTest; const __v4hi = @Vector(4, i16); var v4_a = __v4hi{ 1, 2, 3, 4 }; @@ -934,7 +932,6 @@ test "saturating add" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; const S = struct { fn doTheTest() !void { @@ -969,7 +966,6 @@ test "saturating subtraction" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; const S = struct { fn doTheTest() !void { @@ -989,7 +985,6 @@ test "saturating subtraction" { test "saturating multiplication" { if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO @@ -1018,12 +1013,12 @@ test "saturating multiplication" { test "saturating shift-left" { if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; const S = struct { fn doTheTest() !void { @@ -1043,12 +1038,12 @@ test "saturating shift-left" { test "multiplication-assignment operator with an array operand" { if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; const S = struct { fn doTheTest() !void { @@ -1065,7 +1060,6 @@ test "multiplication-assignment operator with an array operand" { test "@addWithOverflow" { if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO @@ -1116,7 +1110,6 @@ test "@addWithOverflow" { test "@subWithOverflow" { if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO @@ -1151,7 +1144,6 @@ test "@subWithOverflow" { test "@mulWithOverflow" { if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO @@ -1175,7 +1167,6 @@ test "@mulWithOverflow" { test "@shlWithOverflow" { if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO @@ -1314,7 +1305,7 @@ test "zero multiplicand" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; const zeros = @Vector(2, u32){ 0.0, 0.0 }; var ones = @Vector(2, u32){ 1.0, 1.0 }; @@ -1362,7 +1353,6 @@ test "array operands to shuffle are coerced to vectors" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const mask = [5]i32{ -1, 0, 1, 2, 3 }; @@ -1469,7 +1459,6 @@ test "compare vectors with different element types" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; diff --git a/test/behavior/x86_64/binary.zig b/test/behavior/x86_64/binary.zig index 63bd3e4c99..7fd0c9575d 100644 --- a/test/behavior/x86_64/binary.zig +++ b/test/behavior/x86_64/binary.zig @@ -1,5 +1,7 @@ const AddOneBit = math.AddOneBit; +const AsSignedness = math.AsSignedness; const cast = math.cast; +const ChangeScalar = math.ChangeScalar; const checkExpected = math.checkExpected; const Compare = math.Compare; const DoubleBits = math.DoubleBits; @@ -13,6 +15,7 @@ const math = @import("math.zig"); const nan = math.nan; const Scalar = math.Scalar; const sign = math.sign; +const splat = math.splat; const Sse = math.Sse; const tmin = math.tmin; @@ -5141,6 +5144,7 @@ inline fn mulSat(comptime Type: type, lhs: Type, rhs: Type) Type { test mulSat { const test_mul_sat = binary(mulSat, .{}); try test_mul_sat.testInts(); + try test_mul_sat.testIntVectors(); } inline fn multiply(comptime Type: type, lhs: Type, rhs: Type) @TypeOf(lhs * rhs) { @@ -5240,38 +5244,42 @@ test min { try test_min.testFloatVectors(); } -inline fn addWithOverflow(comptime Type: type, lhs: Type, rhs: Type) struct { Type, u1 } { +inline fn addWithOverflow(comptime Type: type, lhs: Type, rhs: Type) struct { Type, ChangeScalar(Type, u1) } { return @addWithOverflow(lhs, rhs); } test addWithOverflow { const test_add_with_overflow = binary(addWithOverflow, .{}); try test_add_with_overflow.testInts(); + try test_add_with_overflow.testIntVectors(); } -inline fn subWithOverflow(comptime Type: type, lhs: Type, rhs: Type) struct { Type, u1 } { +inline fn subWithOverflow(comptime Type: type, lhs: Type, rhs: Type) struct { Type, ChangeScalar(Type, u1) } { return @subWithOverflow(lhs, rhs); } test subWithOverflow { const test_sub_with_overflow = binary(subWithOverflow, .{}); try test_sub_with_overflow.testInts(); + try test_sub_with_overflow.testIntVectors(); } -inline fn mulWithOverflow(comptime Type: type, lhs: Type, rhs: Type) struct { Type, u1 } { +inline fn mulWithOverflow(comptime Type: type, lhs: Type, rhs: Type) struct { Type, ChangeScalar(Type, u1) } { return @mulWithOverflow(lhs, rhs); } test mulWithOverflow { const test_mul_with_overflow = binary(mulWithOverflow, .{}); try test_mul_with_overflow.testInts(); + try test_mul_with_overflow.testIntVectors(); } -inline fn shlWithOverflow(comptime Type: type, lhs: Type, rhs: Type) struct { Type, u1 } { - const bit_cast_rhs: @Type(.{ .int = .{ .signedness = .unsigned, .bits = @bitSizeOf(Type) } }) = @bitCast(rhs); +inline fn shlWithOverflow(comptime Type: type, lhs: Type, rhs: Type) struct { Type, ChangeScalar(Type, u1) } { + const bit_cast_rhs: AsSignedness(Type, .unsigned) = @bitCast(rhs); const truncate_rhs: Log2Int(Type) = @truncate(bit_cast_rhs); - return @shlWithOverflow(lhs, if (comptime cast(Log2Int(Type), @bitSizeOf(Type))) |bits| truncate_rhs % bits else truncate_rhs); + return @shlWithOverflow(lhs, if (comptime cast(Log2Int(Scalar(Type)), @bitSizeOf(Scalar(Type)))) |bits| truncate_rhs % splat(Log2Int(Type), bits) else truncate_rhs); } test shlWithOverflow { const test_shl_with_overflow = binary(shlWithOverflow, .{}); try test_shl_with_overflow.testInts(); + try test_shl_with_overflow.testIntVectors(); } inline fn equal(comptime Type: type, lhs: Type, rhs: Type) @TypeOf(lhs == rhs) { @@ -5280,7 +5288,9 @@ inline fn equal(comptime Type: type, lhs: Type, rhs: Type) @TypeOf(lhs == rhs) { test equal { const test_equal = binary(equal, .{}); try test_equal.testInts(); + try test_equal.testIntVectors(); try test_equal.testFloats(); + try test_equal.testFloatVectors(); } inline fn notEqual(comptime Type: type, lhs: Type, rhs: Type) @TypeOf(lhs != rhs) { @@ -5289,7 +5299,9 @@ inline fn notEqual(comptime Type: type, lhs: Type, rhs: Type) @TypeOf(lhs != rhs test notEqual { const test_not_equal = binary(notEqual, .{}); try test_not_equal.testInts(); + try test_not_equal.testIntVectors(); try test_not_equal.testFloats(); + try test_not_equal.testFloatVectors(); } inline fn lessThan(comptime Type: type, lhs: Type, rhs: Type) @TypeOf(lhs < rhs) { @@ -5298,7 +5310,9 @@ inline fn lessThan(comptime Type: type, lhs: Type, rhs: Type) @TypeOf(lhs < rhs) test lessThan { const test_less_than = binary(lessThan, .{}); try test_less_than.testInts(); + try test_less_than.testIntVectors(); try test_less_than.testFloats(); + try test_less_than.testFloatVectors(); } inline fn lessThanOrEqual(comptime Type: type, lhs: Type, rhs: Type) @TypeOf(lhs <= rhs) { @@ -5307,7 +5321,9 @@ inline fn lessThanOrEqual(comptime Type: type, lhs: Type, rhs: Type) @TypeOf(lhs test lessThanOrEqual { const test_less_than_or_equal = binary(lessThanOrEqual, .{}); try test_less_than_or_equal.testInts(); + try test_less_than_or_equal.testIntVectors(); try test_less_than_or_equal.testFloats(); + try test_less_than_or_equal.testFloatVectors(); } inline fn greaterThan(comptime Type: type, lhs: Type, rhs: Type) @TypeOf(lhs > rhs) { @@ -5316,7 +5332,9 @@ inline fn greaterThan(comptime Type: type, lhs: Type, rhs: Type) @TypeOf(lhs > r test greaterThan { const test_greater_than = binary(greaterThan, .{}); try test_greater_than.testInts(); + try test_greater_than.testIntVectors(); try test_greater_than.testFloats(); + try test_greater_than.testFloatVectors(); } inline fn greaterThanOrEqual(comptime Type: type, lhs: Type, rhs: Type) @TypeOf(lhs >= rhs) { @@ -5325,7 +5343,9 @@ inline fn greaterThanOrEqual(comptime Type: type, lhs: Type, rhs: Type) @TypeOf( test greaterThanOrEqual { const test_greater_than_or_equal = binary(greaterThanOrEqual, .{}); try test_greater_than_or_equal.testInts(); + try test_greater_than_or_equal.testIntVectors(); try test_greater_than_or_equal.testFloats(); + try test_greater_than_or_equal.testFloatVectors(); } inline fn bitAnd(comptime Type: type, lhs: Type, rhs: Type) @TypeOf(lhs & rhs) { @@ -5347,54 +5367,57 @@ test bitOr { } inline fn shr(comptime Type: type, lhs: Type, rhs: Type) Type { - const bit_cast_rhs: @Type(.{ .int = .{ .signedness = .unsigned, .bits = @bitSizeOf(Type) } }) = @bitCast(rhs); + const bit_cast_rhs: AsSignedness(Type, .unsigned) = @bitCast(rhs); const truncate_rhs: Log2Int(Type) = @truncate(bit_cast_rhs); - return lhs >> if (comptime cast(Log2Int(Type), @bitSizeOf(Type))) |bits| truncate_rhs % bits else truncate_rhs; + return lhs >> if (comptime cast(Log2Int(Scalar(Type)), @bitSizeOf(Scalar(Type)))) |bits| truncate_rhs % splat(Log2Int(Type), bits) else truncate_rhs; } test shr { const test_shr = binary(shr, .{}); try test_shr.testInts(); + try test_shr.testIntVectors(); } inline fn shrExact(comptime Type: type, lhs: Type, rhs: Type) Type { - const bit_cast_rhs: @Type(.{ .int = .{ .signedness = .unsigned, .bits = @bitSizeOf(Type) } }) = @bitCast(rhs); + const bit_cast_rhs: AsSignedness(Type, .unsigned) = @bitCast(rhs); const truncate_rhs: Log2Int(Type) = @truncate(bit_cast_rhs); - const final_rhs = if (comptime cast(Log2Int(Type), @bitSizeOf(Type))) |bits| truncate_rhs % bits else truncate_rhs; + const final_rhs = if (comptime cast(Log2Int(Scalar(Type)), @bitSizeOf(Scalar(Type)))) |bits| truncate_rhs % splat(Log2Int(Type), bits) else truncate_rhs; return @shrExact(lhs >> final_rhs << final_rhs, final_rhs); } test shrExact { const test_shr_exact = binary(shrExact, .{}); try test_shr_exact.testInts(); + try test_shr_exact.testIntVectors(); } inline fn shl(comptime Type: type, lhs: Type, rhs: Type) Type { - const bit_cast_rhs: @Type(.{ .int = .{ .signedness = .unsigned, .bits = @bitSizeOf(Type) } }) = @bitCast(rhs); + const bit_cast_rhs: AsSignedness(Type, .unsigned) = @bitCast(rhs); const truncate_rhs: Log2Int(Type) = @truncate(bit_cast_rhs); - return lhs << if (comptime cast(Log2Int(Type), @bitSizeOf(Type))) |bits| truncate_rhs % bits else truncate_rhs; + return lhs << if (comptime cast(Log2Int(Scalar(Type)), @bitSizeOf(Scalar(Type)))) |bits| truncate_rhs % splat(Log2Int(Type), bits) else truncate_rhs; } test shl { const test_shl = binary(shl, .{}); try test_shl.testInts(); + try test_shl.testIntVectors(); } inline fn shlExactUnsafe(comptime Type: type, lhs: Type, rhs: Type) Type { @setRuntimeSafety(false); - const bit_cast_rhs: @Type(.{ .int = .{ .signedness = .unsigned, .bits = @bitSizeOf(Type) } }) = @bitCast(rhs); + const bit_cast_rhs: AsSignedness(Type, .unsigned) = @bitCast(rhs); const truncate_rhs: Log2Int(Type) = @truncate(bit_cast_rhs); - const final_rhs = if (comptime cast(Log2Int(Type), @bitSizeOf(Type))) |bits| truncate_rhs % bits else truncate_rhs; + const final_rhs = if (comptime cast(Log2Int(Scalar(Type)), @bitSizeOf(Scalar(Type)))) |bits| truncate_rhs % splat(Log2Int(Type), bits) else truncate_rhs; return @shlExact(lhs << final_rhs >> final_rhs, final_rhs); } test shlExactUnsafe { const test_shl_exact_unsafe = binary(shlExactUnsafe, .{}); try test_shl_exact_unsafe.testInts(); + try test_shl_exact_unsafe.testIntVectors(); } inline fn shlSat(comptime Type: type, lhs: Type, rhs: Type) Type { // workaround https://github.com/ziglang/zig/issues/23034 if (@inComptime()) { // workaround https://github.com/ziglang/zig/issues/23139 - //return lhs <<| @min(@abs(rhs), imax(u64)); - return lhs <<| @min(@abs(rhs), @as(u64, imax(u64))); + return lhs <<| @min(@abs(rhs), splat(ChangeScalar(Type, u64), imax(u64))); } // workaround https://github.com/ziglang/zig/issues/23033 @setRuntimeSafety(false); @@ -5403,6 +5426,7 @@ inline fn shlSat(comptime Type: type, lhs: Type, rhs: Type) Type { test shlSat { const test_shl_sat = binary(shlSat, .{}); try test_shl_sat.testInts(); + try test_shl_sat.testIntVectors(); } inline fn bitXor(comptime Type: type, lhs: Type, rhs: Type) @TypeOf(lhs ^ rhs) { diff --git a/test/behavior/x86_64/math.zig b/test/behavior/x86_64/math.zig index bc2b417620..b3ace13ab6 100644 --- a/test/behavior/x86_64/math.zig +++ b/test/behavior/x86_64/math.zig @@ -8,8 +8,6 @@ pub const fmin = math.floatMin; pub const imax = math.maxInt; pub const imin = math.minInt; pub const inf = math.inf; -pub const Log2Int = math.Log2Int; -pub const Log2IntCeil = math.Log2IntCeil; pub const nan = math.nan; pub const next = math.nextAfter; pub const tmin = math.floatTrueMin; @@ -30,38 +28,44 @@ pub fn Scalar(comptime Type: type) type { .vector => |info| info.child, }; } +pub fn ChangeScalar(comptime Type: type, comptime NewScalar: type) type { + return switch (@typeInfo(Type)) { + else => NewScalar, + .vector => |vector| @Vector(vector.len, NewScalar), + }; +} +pub fn AsSignedness(comptime Type: type, comptime signedness: std.builtin.Signedness) type { + return ChangeScalar(Type, @Type(.{ .int = .{ + .signedness = signedness, + .bits = @typeInfo(Scalar(Type)).int.bits, + } })); +} pub fn AddOneBit(comptime Type: type) type { - const ResultScalar = switch (@typeInfo(Scalar(Type))) { + return ChangeScalar(Type, switch (@typeInfo(Scalar(Type))) { .int => |int| @Type(.{ .int = .{ .signedness = int.signedness, .bits = 1 + int.bits } }), .float => Scalar(Type), else => @compileError(@typeName(Type)), - }; - return switch (@typeInfo(Type)) { - else => ResultScalar, - .vector => |vector| @Vector(vector.len, ResultScalar), - }; + }); } pub fn DoubleBits(comptime Type: type) type { - const ResultScalar = switch (@typeInfo(Scalar(Type))) { + return ChangeScalar(Type, switch (@typeInfo(Scalar(Type))) { .int => |int| @Type(.{ .int = .{ .signedness = int.signedness, .bits = int.bits * 2 } }), .float => Scalar(Type), else => @compileError(@typeName(Type)), - }; - return switch (@typeInfo(Type)) { - else => ResultScalar, - .vector => |vector| @Vector(vector.len, ResultScalar), - }; + }); } pub fn RoundBitsUp(comptime Type: type, comptime multiple: u16) type { - const ResultScalar = switch (@typeInfo(Scalar(Type))) { + return ChangeScalar(Type, switch (@typeInfo(Scalar(Type))) { .int => |int| @Type(.{ .int = .{ .signedness = int.signedness, .bits = std.mem.alignForward(u16, int.bits, multiple) } }), .float => Scalar(Type), else => @compileError(@typeName(Type)), - }; - return switch (@typeInfo(Type)) { - else => ResultScalar, - .vector => |vector| @Vector(vector.len, ResultScalar), - }; + }); +} +pub fn Log2Int(comptime Type: type) type { + return ChangeScalar(Type, math.Log2Int(Scalar(Type))); +} +pub fn Log2IntCeil(comptime Type: type) type { + return ChangeScalar(Type, math.Log2IntCeil(Scalar(Type))); } // inline to avoid a runtime `@splat` pub inline fn splat(comptime Type: type, scalar: Scalar(Type)) Type { @@ -78,18 +82,12 @@ inline fn select(cond: anytype, lhs: anytype, rhs: @TypeOf(lhs)) @TypeOf(lhs) { else => @compileError(@typeName(@TypeOf(cond))), }; } -pub fn sign(rhs: anytype) switch (@typeInfo(@TypeOf(rhs))) { - else => bool, - .vector => |vector| @Vector(vector.len, bool), -} { +pub fn sign(rhs: anytype) ChangeScalar(@TypeOf(rhs), bool) { const ScalarInt = @Type(.{ .int = .{ .signedness = .unsigned, .bits = @bitSizeOf(Scalar(@TypeOf(rhs))), } }); - const VectorInt = switch (@typeInfo(@TypeOf(rhs))) { - else => ScalarInt, - .vector => |vector| @Vector(vector.len, ScalarInt), - }; + const VectorInt = ChangeScalar(@TypeOf(rhs), ScalarInt); return @as(VectorInt, @bitCast(rhs)) & splat(VectorInt, @as(ScalarInt, 1) << @bitSizeOf(ScalarInt) - 1) != splat(VectorInt, 0); } fn boolAnd(lhs: anytype, rhs: @TypeOf(lhs)) @TypeOf(lhs) { diff --git a/test/behavior/x86_64/unary.zig b/test/behavior/x86_64/unary.zig index 827d08c4c7..132d17b42d 100644 --- a/test/behavior/x86_64/unary.zig +++ b/test/behavior/x86_64/unary.zig @@ -4828,6 +4828,7 @@ inline fn ctz(comptime Type: type, rhs: Type) @TypeOf(@ctz(rhs)) { test ctz { const test_ctz = unary(ctz, .{}); try test_ctz.testInts(); + try test_ctz.testIntVectors(); } inline fn popCount(comptime Type: type, rhs: Type) @TypeOf(@popCount(rhs)) { @@ -4836,6 +4837,7 @@ inline fn popCount(comptime Type: type, rhs: Type) @TypeOf(@popCount(rhs)) { test popCount { const test_pop_count = unary(popCount, .{}); try test_pop_count.testInts(); + try test_pop_count.testIntVectors(); } inline fn byteSwap(comptime Type: type, rhs: Type) RoundBitsUp(Type, 8) { @@ -4844,6 +4846,7 @@ inline fn byteSwap(comptime Type: type, rhs: Type) RoundBitsUp(Type, 8) { test byteSwap { const test_byte_swap = unary(byteSwap, .{}); try test_byte_swap.testInts(); + try test_byte_swap.testIntVectors(); } inline fn bitReverse(comptime Type: type, rhs: Type) @TypeOf(@bitReverse(rhs)) { @@ -4852,6 +4855,7 @@ inline fn bitReverse(comptime Type: type, rhs: Type) @TypeOf(@bitReverse(rhs)) { test bitReverse { const test_bit_reverse = unary(bitReverse, .{}); try test_bit_reverse.testInts(); + try test_bit_reverse.testIntVectors(); } inline fn sqrt(comptime Type: type, rhs: Type) @TypeOf(@sqrt(rhs)) { diff --git a/test/cases/compile_errors/@import_zon_bad_type.zig b/test/cases/compile_errors/@import_zon_bad_type.zig index f20c4efd7c..d1ccfc312c 100644 --- a/test/cases/compile_errors/@import_zon_bad_type.zig +++ b/test/cases/compile_errors/@import_zon_bad_type.zig @@ -117,9 +117,9 @@ export fn testMutablePointer() void { // tmp.zig:37:38: note: imported here // neg_inf.zon:1:1: error: expected type '?u8' // tmp.zig:57:28: note: imported here -// neg_inf.zon:1:1: error: expected type 'tmp.testNonExhaustiveEnum__enum_518' +// neg_inf.zon:1:1: error: expected type 'tmp.testNonExhaustiveEnum__enum_522' // tmp.zig:62:39: note: imported here -// neg_inf.zon:1:1: error: expected type 'tmp.testUntaggedUnion__union_520' +// neg_inf.zon:1:1: error: expected type 'tmp.testUntaggedUnion__union_524' // tmp.zig:67:44: note: imported here -// neg_inf.zon:1:1: error: expected type 'tmp.testTaggedUnionVoid__union_523' +// neg_inf.zon:1:1: error: expected type 'tmp.testTaggedUnionVoid__union_527' // tmp.zig:72:50: note: imported here diff --git a/test/cases/compile_errors/anytype_param_requires_comptime.zig b/test/cases/compile_errors/anytype_param_requires_comptime.zig index 1212d04a00..c0990a9ed3 100644 --- a/test/cases/compile_errors/anytype_param_requires_comptime.zig +++ b/test/cases/compile_errors/anytype_param_requires_comptime.zig @@ -15,6 +15,6 @@ pub export fn entry() void { // error // // :7:25: error: unable to resolve comptime value -// :7:25: note: initializer of comptime-only struct 'tmp.S.foo__anon_492.C' must be comptime-known +// :7:25: note: initializer of comptime-only struct 'tmp.S.foo__anon_496.C' must be comptime-known // :4:16: note: struct requires comptime because of this field // :4:16: note: types are not available at runtime diff --git a/test/cases/compile_errors/bad_panic_call_signature.zig b/test/cases/compile_errors/bad_panic_call_signature.zig index 1af0fdeb17..6d88f1b878 100644 --- a/test/cases/compile_errors/bad_panic_call_signature.zig +++ b/test/cases/compile_errors/bad_panic_call_signature.zig @@ -15,8 +15,7 @@ pub const panic = struct { pub const castToNull = simple_panic.castToNull; pub const incorrectAlignment = simple_panic.incorrectAlignment; pub const invalidErrorCode = simple_panic.invalidErrorCode; - pub const castTruncatedData = simple_panic.castTruncatedData; - pub const negativeToUnsigned = simple_panic.negativeToUnsigned; + pub const integerOutOfBounds = simple_panic.integerOutOfBounds; pub const integerOverflow = simple_panic.integerOverflow; pub const shlOverflow = simple_panic.shlOverflow; pub const shrOverflow = simple_panic.shrOverflow; @@ -27,8 +26,6 @@ pub const panic = struct { pub const shiftRhsTooBig = simple_panic.shiftRhsTooBig; pub const invalidEnumValue = simple_panic.invalidEnumValue; pub const forLenMismatch = simple_panic.forLenMismatch; - /// Delete after next zig1.wasm update - pub const memcpyLenMismatch = copyLenMismatch; pub const copyLenMismatch = simple_panic.copyLenMismatch; pub const memcpyAlias = simple_panic.memcpyAlias; pub const noreturnReturned = simple_panic.noreturnReturned; diff --git a/test/cases/compile_errors/bad_panic_generic_signature.zig b/test/cases/compile_errors/bad_panic_generic_signature.zig index 9373551359..8ef4810745 100644 --- a/test/cases/compile_errors/bad_panic_generic_signature.zig +++ b/test/cases/compile_errors/bad_panic_generic_signature.zig @@ -11,8 +11,7 @@ pub const panic = struct { pub const castToNull = simple_panic.castToNull; pub const incorrectAlignment = simple_panic.incorrectAlignment; pub const invalidErrorCode = simple_panic.invalidErrorCode; - pub const castTruncatedData = simple_panic.castTruncatedData; - pub const negativeToUnsigned = simple_panic.negativeToUnsigned; + pub const integerOutOfBounds = simple_panic.integerOutOfBounds; pub const integerOverflow = simple_panic.integerOverflow; pub const shlOverflow = simple_panic.shlOverflow; pub const shrOverflow = simple_panic.shrOverflow; @@ -23,8 +22,6 @@ pub const panic = struct { pub const shiftRhsTooBig = simple_panic.shiftRhsTooBig; pub const invalidEnumValue = simple_panic.invalidEnumValue; pub const forLenMismatch = simple_panic.forLenMismatch; - /// Delete after next zig1.wasm update - pub const memcpyLenMismatch = copyLenMismatch; pub const copyLenMismatch = simple_panic.copyLenMismatch; pub const memcpyAlias = simple_panic.memcpyAlias; pub const noreturnReturned = simple_panic.noreturnReturned; diff --git a/test/cases/compile_errors/bogus_method_call_on_slice.zig b/test/cases/compile_errors/bogus_method_call_on_slice.zig index 5c203b90ad..4e35c4264e 100644 --- a/test/cases/compile_errors/bogus_method_call_on_slice.zig +++ b/test/cases/compile_errors/bogus_method_call_on_slice.zig @@ -16,5 +16,5 @@ pub export fn entry2() void { // // :3:6: error: no field or member function named 'copy' in '[]const u8' // :9:8: error: no field or member function named 'bar' in '@TypeOf(.{})' -// :12:18: error: no field or member function named 'bar' in 'tmp.entry2__struct_496' +// :12:18: error: no field or member function named 'bar' in 'tmp.entry2__struct_500' // :12:6: note: struct declared here diff --git a/test/cases/compile_errors/coerce_anon_struct.zig b/test/cases/compile_errors/coerce_anon_struct.zig index 461e44de3e..9b515762b1 100644 --- a/test/cases/compile_errors/coerce_anon_struct.zig +++ b/test/cases/compile_errors/coerce_anon_struct.zig @@ -6,6 +6,6 @@ export fn foo() void { // error // -// :4:16: error: expected type 'tmp.T', found 'tmp.foo__struct_485' +// :4:16: error: expected type 'tmp.T', found 'tmp.foo__struct_489' // :3:16: note: struct declared here // :1:11: note: struct declared here diff --git a/test/cases/compile_errors/redundant_try.zig b/test/cases/compile_errors/redundant_try.zig index c58049e92b..b6d030686b 100644 --- a/test/cases/compile_errors/redundant_try.zig +++ b/test/cases/compile_errors/redundant_try.zig @@ -44,9 +44,9 @@ comptime { // // :5:23: error: expected error union type, found 'comptime_int' // :10:23: error: expected error union type, found '@TypeOf(.{})' -// :15:23: error: expected error union type, found 'tmp.test2__struct_522' +// :15:23: error: expected error union type, found 'tmp.test2__struct_526' // :15:23: note: struct declared here -// :20:27: error: expected error union type, found 'tmp.test3__struct_524' +// :20:27: error: expected error union type, found 'tmp.test3__struct_528' // :20:27: note: struct declared here // :25:23: error: expected error union type, found 'struct { comptime *const [5:0]u8 = "hello" }' // :31:13: error: expected error union type, found 'u32' diff --git a/test/cases/compile_errors/shuffle_with_selected_index_past_first_vector_length.zig b/test/cases/compile_errors/shuffle_with_selected_index_past_first_vector_length.zig index c1594d55fb..4ad01d28c4 100644 --- a/test/cases/compile_errors/shuffle_with_selected_index_past_first_vector_length.zig +++ b/test/cases/compile_errors/shuffle_with_selected_index_past_first_vector_length.zig @@ -1,14 +1,20 @@ -export fn entry() void { - const v: @Vector(4, u32) = [4]u32{ 10, 11, 12, 13 }; - const x: @Vector(4, u32) = [4]u32{ 14, 15, 16, 17 }; - const z = @shuffle(u32, v, x, [8]i32{ 0, 1, 2, 3, 7, 6, 5, 4 }); - _ = z; +export fn foo() void { + // Here, the bad index ('7') is not less than 'b.len', so the error shouldn't have a note suggesting a negative index. + const a: @Vector(4, u32) = .{ 10, 11, 12, 13 }; + const b: @Vector(4, u32) = .{ 14, 15, 16, 17 }; + _ = @shuffle(u32, a, b, [8]i32{ 0, 1, 2, 3, 7, 6, 5, 4 }); +} +export fn bar() void { + // Here, the bad index ('7') *is* less than 'b.len', so the error *should* have a note suggesting a negative index. + const a: @Vector(4, u32) = .{ 10, 11, 12, 13 }; + const b: @Vector(9, u32) = .{ 14, 15, 16, 17, 18, 19, 20, 21, 22 }; + _ = @shuffle(u32, a, b, [8]i32{ 0, 1, 2, 3, 7, 6, 5, 4 }); } // error -// backend=stage2 -// target=native // -// :4:41: error: mask index '4' has out-of-bounds selection -// :4:29: note: selected index '7' out of bounds of '@Vector(4, u32)' -// :4:32: note: selections from the second vector are specified with negative numbers +// :5:35: error: mask element at index '4' selects out-of-bounds index +// :5:23: note: index '7' exceeds bounds of '@Vector(4, u32)' given here +// :11:35: error: mask element at index '4' selects out-of-bounds index +// :11:23: note: index '7' exceeds bounds of '@Vector(4, u32)' given here +// :11:26: note: use '~@as(u32, 7)' to index into second vector given here diff --git a/test/cases/safety/@intCast to u0.zig b/test/cases/safety/@intCast to u0.zig index 1637a859ad..4394f63f54 100644 --- a/test/cases/safety/@intCast to u0.zig +++ b/test/cases/safety/@intCast to u0.zig @@ -2,7 +2,7 @@ const std = @import("std"); pub fn panic(message: []const u8, stack_trace: ?*std.builtin.StackTrace, _: ?usize) noreturn { _ = stack_trace; - if (std.mem.eql(u8, message, "integer cast truncated bits")) { + if (std.mem.eql(u8, message, "integer does not fit in destination type")) { std.process.exit(0); } std.process.exit(1); diff --git a/test/cases/safety/memmove_len_mismatch.zig b/test/cases/safety/memmove_len_mismatch.zig index 16774cfe86..881af9f336 100644 --- a/test/cases/safety/memmove_len_mismatch.zig +++ b/test/cases/safety/memmove_len_mismatch.zig @@ -15,5 +15,5 @@ pub fn main() !void { return error.TestFailed; } // run -// backend=llvm +// backend=stage2,llvm // target=native diff --git a/test/cases/safety/signed integer not fitting in cast to unsigned integer - widening.zig b/test/cases/safety/signed integer not fitting in cast to unsigned integer - widening.zig index 3ee2f1fefa..fa0eec94c0 100644 --- a/test/cases/safety/signed integer not fitting in cast to unsigned integer - widening.zig +++ b/test/cases/safety/signed integer not fitting in cast to unsigned integer - widening.zig @@ -2,7 +2,7 @@ const std = @import("std"); pub fn panic(message: []const u8, stack_trace: ?*std.builtin.StackTrace, _: ?usize) noreturn { _ = stack_trace; - if (std.mem.eql(u8, message, "attempt to cast negative value to unsigned integer")) { + if (std.mem.eql(u8, message, "integer does not fit in destination type")) { std.process.exit(0); } std.process.exit(1); diff --git a/test/cases/safety/signed integer not fitting in cast to unsigned integer.zig b/test/cases/safety/signed integer not fitting in cast to unsigned integer.zig index 44402c329e..6ce662cdc7 100644 --- a/test/cases/safety/signed integer not fitting in cast to unsigned integer.zig +++ b/test/cases/safety/signed integer not fitting in cast to unsigned integer.zig @@ -2,7 +2,7 @@ const std = @import("std"); pub fn panic(message: []const u8, stack_trace: ?*std.builtin.StackTrace, _: ?usize) noreturn { _ = stack_trace; - if (std.mem.eql(u8, message, "attempt to cast negative value to unsigned integer")) { + if (std.mem.eql(u8, message, "integer does not fit in destination type")) { std.process.exit(0); } std.process.exit(1); diff --git a/test/cases/safety/signed-unsigned vector cast.zig b/test/cases/safety/signed-unsigned vector cast.zig index f4da258f28..919562b06c 100644 --- a/test/cases/safety/signed-unsigned vector cast.zig +++ b/test/cases/safety/signed-unsigned vector cast.zig @@ -2,7 +2,7 @@ const std = @import("std"); pub fn panic(message: []const u8, stack_trace: ?*std.builtin.StackTrace, _: ?usize) noreturn { _ = stack_trace; - if (std.mem.eql(u8, message, "attempt to cast negative value to unsigned integer")) { + if (std.mem.eql(u8, message, "integer does not fit in destination type")) { std.process.exit(0); } std.process.exit(1); diff --git a/test/cases/safety/slice_cast_change_len_0.zig b/test/cases/safety/slice_cast_change_len_0.zig index 26fee36672..d32bdfc920 100644 --- a/test/cases/safety/slice_cast_change_len_0.zig +++ b/test/cases/safety/slice_cast_change_len_0.zig @@ -23,4 +23,5 @@ pub fn panic(message: []const u8, _: ?*std.builtin.StackTrace, _: ?usize) noretu const std = @import("std"); // run -// backend=llvm +// backend=stage2,llvm +// target=x86_64-linux diff --git a/test/cases/safety/slice_cast_change_len_1.zig b/test/cases/safety/slice_cast_change_len_1.zig index 139b6fe8a2..5d3728bcdc 100644 --- a/test/cases/safety/slice_cast_change_len_1.zig +++ b/test/cases/safety/slice_cast_change_len_1.zig @@ -23,4 +23,5 @@ pub fn panic(message: []const u8, _: ?*std.builtin.StackTrace, _: ?usize) noretu const std = @import("std"); // run -// backend=llvm +// backend=stage2,llvm +// target=x86_64-linux diff --git a/test/cases/safety/slice_cast_change_len_2.zig b/test/cases/safety/slice_cast_change_len_2.zig index 280e136cb8..3a25d27504 100644 --- a/test/cases/safety/slice_cast_change_len_2.zig +++ b/test/cases/safety/slice_cast_change_len_2.zig @@ -23,4 +23,5 @@ pub fn panic(message: []const u8, _: ?*std.builtin.StackTrace, _: ?usize) noretu const std = @import("std"); // run -// backend=llvm +// backend=stage2,llvm +// target=x86_64-linux diff --git a/test/cases/safety/truncating vector cast.zig b/test/cases/safety/truncating vector cast.zig index ae76d4dec1..9b222e6918 100644 --- a/test/cases/safety/truncating vector cast.zig +++ b/test/cases/safety/truncating vector cast.zig @@ -2,7 +2,7 @@ const std = @import("std"); pub fn panic(message: []const u8, stack_trace: ?*std.builtin.StackTrace, _: ?usize) noreturn { _ = stack_trace; - if (std.mem.eql(u8, message, "integer cast truncated bits")) { + if (std.mem.eql(u8, message, "integer does not fit in destination type")) { std.process.exit(0); } std.process.exit(1); @@ -17,5 +17,5 @@ pub fn main() !void { } // run -// backend=llvm +// backend=stage2,llvm // target=native diff --git a/test/cases/safety/unsigned integer not fitting in cast to signed integer - same bit count.zig b/test/cases/safety/unsigned integer not fitting in cast to signed integer - same bit count.zig index 6a3f0c08a6..185cde9973 100644 --- a/test/cases/safety/unsigned integer not fitting in cast to signed integer - same bit count.zig +++ b/test/cases/safety/unsigned integer not fitting in cast to signed integer - same bit count.zig @@ -2,7 +2,7 @@ const std = @import("std"); pub fn panic(message: []const u8, stack_trace: ?*std.builtin.StackTrace, _: ?usize) noreturn { _ = stack_trace; - if (std.mem.eql(u8, message, "integer cast truncated bits")) { + if (std.mem.eql(u8, message, "integer does not fit in destination type")) { std.process.exit(0); } std.process.exit(1); diff --git a/test/cases/safety/unsigned-signed vector cast.zig b/test/cases/safety/unsigned-signed vector cast.zig index 32676f7c1c..6501643b36 100644 --- a/test/cases/safety/unsigned-signed vector cast.zig +++ b/test/cases/safety/unsigned-signed vector cast.zig @@ -2,7 +2,7 @@ const std = @import("std"); pub fn panic(message: []const u8, stack_trace: ?*std.builtin.StackTrace, _: ?usize) noreturn { _ = stack_trace; - if (std.mem.eql(u8, message, "integer cast truncated bits")) { + if (std.mem.eql(u8, message, "integer does not fit in destination type")) { std.process.exit(0); } std.process.exit(1); @@ -17,5 +17,5 @@ pub fn main() !void { } // run -// backend=llvm +// backend=stage2,llvm // target=native diff --git a/test/cases/safety/value does not fit in shortening cast - u0.zig b/test/cases/safety/value does not fit in shortening cast - u0.zig index 3644437ea1..f29df8d8af 100644 --- a/test/cases/safety/value does not fit in shortening cast - u0.zig +++ b/test/cases/safety/value does not fit in shortening cast - u0.zig @@ -2,7 +2,7 @@ const std = @import("std"); pub fn panic(message: []const u8, stack_trace: ?*std.builtin.StackTrace, _: ?usize) noreturn { _ = stack_trace; - if (std.mem.eql(u8, message, "integer cast truncated bits")) { + if (std.mem.eql(u8, message, "integer does not fit in destination type")) { std.process.exit(0); } std.process.exit(1); diff --git a/test/cases/safety/value does not fit in shortening cast.zig b/test/cases/safety/value does not fit in shortening cast.zig index b48c4698fa..415ac95dbb 100644 --- a/test/cases/safety/value does not fit in shortening cast.zig +++ b/test/cases/safety/value does not fit in shortening cast.zig @@ -2,7 +2,7 @@ const std = @import("std"); pub fn panic(message: []const u8, stack_trace: ?*std.builtin.StackTrace, _: ?usize) noreturn { _ = stack_trace; - if (std.mem.eql(u8, message, "integer cast truncated bits")) { + if (std.mem.eql(u8, message, "integer does not fit in destination type")) { std.process.exit(0); } std.process.exit(1); diff --git a/test/cases/safety/vector integer addition overflow.zig b/test/cases/safety/vector integer addition overflow.zig index 21c26eeb4e..db08d8b241 100644 --- a/test/cases/safety/vector integer addition overflow.zig +++ b/test/cases/safety/vector integer addition overflow.zig @@ -18,5 +18,5 @@ fn add(a: @Vector(4, i32), b: @Vector(4, i32)) @Vector(4, i32) { return a + b; } // run -// backend=llvm +// backend=stage2,llvm // target=native diff --git a/test/cases/safety/vector integer multiplication overflow.zig b/test/cases/safety/vector integer multiplication overflow.zig index 8678eccec6..61176fd482 100644 --- a/test/cases/safety/vector integer multiplication overflow.zig +++ b/test/cases/safety/vector integer multiplication overflow.zig @@ -18,5 +18,5 @@ fn mul(a: @Vector(4, u8), b: @Vector(4, u8)) @Vector(4, u8) { return a * b; } // run -// backend=llvm +// backend=stage2,llvm // target=native diff --git a/test/cases/safety/vector integer negation overflow.zig b/test/cases/safety/vector integer negation overflow.zig index 5f8becad17..f1f36ff294 100644 --- a/test/cases/safety/vector integer negation overflow.zig +++ b/test/cases/safety/vector integer negation overflow.zig @@ -18,5 +18,5 @@ fn neg(a: @Vector(4, i16)) @Vector(4, i16) { return -a; } // run -// backend=llvm +// backend=stage2,llvm // target=native diff --git a/test/cases/safety/vector integer subtraction overflow.zig b/test/cases/safety/vector integer subtraction overflow.zig index 82c0342121..9ba942469c 100644 --- a/test/cases/safety/vector integer subtraction overflow.zig +++ b/test/cases/safety/vector integer subtraction overflow.zig @@ -18,5 +18,5 @@ fn sub(a: @Vector(4, u32), b: @Vector(4, u32)) @Vector(4, u32) { return a - b; } // run -// backend=llvm +// backend=stage2,llvm // target=native diff --git a/test/incremental/change_panic_handler_explicit b/test/incremental/change_panic_handler_explicit index 322773fd47..ad5d3d124a 100644 --- a/test/incremental/change_panic_handler_explicit +++ b/test/incremental/change_panic_handler_explicit @@ -26,8 +26,7 @@ pub const panic = struct { pub const castToNull = no_panic.castToNull; pub const incorrectAlignment = no_panic.incorrectAlignment; pub const invalidErrorCode = no_panic.invalidErrorCode; - pub const castTruncatedData = no_panic.castTruncatedData; - pub const negativeToUnsigned = no_panic.negativeToUnsigned; + pub const integerOutOfBounds = no_panic.integerOutOfBounds; pub const shlOverflow = no_panic.shlOverflow; pub const shrOverflow = no_panic.shrOverflow; pub const divideByZero = no_panic.divideByZero; @@ -37,8 +36,6 @@ pub const panic = struct { pub const shiftRhsTooBig = no_panic.shiftRhsTooBig; pub const invalidEnumValue = no_panic.invalidEnumValue; pub const forLenMismatch = no_panic.forLenMismatch; - /// Delete after next zig1.wasm update - pub const memcpyLenMismatch = copyLenMismatch; pub const copyLenMismatch = no_panic.copyLenMismatch; pub const memcpyAlias = no_panic.memcpyAlias; pub const noreturnReturned = no_panic.noreturnReturned; @@ -75,8 +72,7 @@ pub const panic = struct { pub const castToNull = no_panic.castToNull; pub const incorrectAlignment = no_panic.incorrectAlignment; pub const invalidErrorCode = no_panic.invalidErrorCode; - pub const castTruncatedData = no_panic.castTruncatedData; - pub const negativeToUnsigned = no_panic.negativeToUnsigned; + pub const integerOutOfBounds = no_panic.integerOutOfBounds; pub const shlOverflow = no_panic.shlOverflow; pub const shrOverflow = no_panic.shrOverflow; pub const divideByZero = no_panic.divideByZero; @@ -86,8 +82,6 @@ pub const panic = struct { pub const shiftRhsTooBig = no_panic.shiftRhsTooBig; pub const invalidEnumValue = no_panic.invalidEnumValue; pub const forLenMismatch = no_panic.forLenMismatch; - /// Delete after next zig1.wasm update - pub const memcpyLenMismatch = copyLenMismatch; pub const copyLenMismatch = no_panic.copyLenMismatch; pub const memcpyAlias = no_panic.memcpyAlias; pub const noreturnReturned = no_panic.noreturnReturned; @@ -124,8 +118,7 @@ pub const panic = struct { pub const castToNull = no_panic.castToNull; pub const incorrectAlignment = no_panic.incorrectAlignment; pub const invalidErrorCode = no_panic.invalidErrorCode; - pub const castTruncatedData = no_panic.castTruncatedData; - pub const negativeToUnsigned = no_panic.negativeToUnsigned; + pub const integerOutOfBounds = no_panic.integerOutOfBounds; pub const shlOverflow = no_panic.shlOverflow; pub const shrOverflow = no_panic.shrOverflow; pub const divideByZero = no_panic.divideByZero; @@ -135,8 +128,6 @@ pub const panic = struct { pub const shiftRhsTooBig = no_panic.shiftRhsTooBig; pub const invalidEnumValue = no_panic.invalidEnumValue; pub const forLenMismatch = no_panic.forLenMismatch; - /// Delete after next zig1.wasm update - pub const memcpyLenMismatch = copyLenMismatch; pub const copyLenMismatch = no_panic.copyLenMismatch; pub const memcpyAlias = no_panic.memcpyAlias; pub const noreturnReturned = no_panic.noreturnReturned; diff --git a/test/src/Cases.zig b/test/src/Cases.zig index aeb71ce95b..a36f4f3f7c 100644 --- a/test/src/Cases.zig +++ b/test/src/Cases.zig @@ -400,7 +400,7 @@ fn addFromDirInner( for (targets) |target_query| { const output = try manifest.trailingLinesSplit(ctx.arena); try ctx.translate.append(.{ - .name = std.fs.path.stem(filename), + .name = try caseNameFromPath(ctx.arena, filename), .c_frontend = c_frontend, .target = b.resolveTargetQuery(target_query), .link_libc = link_libc, @@ -416,7 +416,7 @@ fn addFromDirInner( for (targets) |target_query| { const output = try manifest.trailingSplit(ctx.arena); try ctx.translate.append(.{ - .name = std.fs.path.stem(filename), + .name = try caseNameFromPath(ctx.arena, filename), .c_frontend = c_frontend, .target = b.resolveTargetQuery(target_query), .link_libc = link_libc, @@ -454,7 +454,7 @@ fn addFromDirInner( const next = ctx.cases.items.len; try ctx.cases.append(.{ - .name = std.fs.path.stem(filename), + .name = try caseNameFromPath(ctx.arena, filename), .import_path = std.fs.path.dirname(filename), .backend = backend, .files = .init(ctx.arena), @@ -1138,3 +1138,17 @@ fn knownFileExtension(filename: []const u8) bool { if (it.next() != null) return false; return false; } + +/// `path` is a path relative to the root case directory. +/// e.g. `compile_errors/undeclared_identifier.zig` +/// The case name is computed by removing the extension and substituting path separators for dots. +/// e.g. `compile_errors.undeclared_identifier` +/// Including the directory components makes `-Dtest-filter` more useful, because you can filter +/// based on subdirectory; e.g. `-Dtest-filter=compile_errors` to run the compile error tets. +fn caseNameFromPath(arena: Allocator, path: []const u8) Allocator.Error![]const u8 { + const ext_len = std.fs.path.extension(path).len; + const path_sans_ext = path[0 .. path.len - ext_len]; + const result = try arena.dupe(u8, path_sans_ext); + std.mem.replaceScalar(u8, result, std.fs.path.sep, '.'); + return result; +} diff --git a/tools/lldb_pretty_printers.py b/tools/lldb_pretty_printers.py index 249c123a11..63bcb63111 100644 --- a/tools/lldb_pretty_printers.py +++ b/tools/lldb_pretty_printers.py @@ -601,7 +601,8 @@ type_tag_handlers = { 'fn_void_no_args': lambda payload: 'fn() void', 'fn_naked_noreturn_no_args': lambda payload: 'fn() callconv(.naked) noreturn', 'fn_ccc_void_no_args': lambda payload: 'fn() callconv(.c) void', - 'single_const_pointer_to_comptime_int': lambda payload: '*const comptime_int', + 'ptr_usize': lambda payload: '*usize', + 'ptr_const_comptime_int': lambda payload: '*const comptime_int', 'manyptr_u8': lambda payload: '[*]u8', 'manyptr_const_u8': lambda payload: '[*]const u8', 'manyptr_const_u8_sentinel_0': lambda payload: '[*:0]const u8',