Merge pull request #24011 from jacobly0/legalize-unary

Legalize: implement scalarization and safety check expansion
This commit is contained in:
Jacob Young 2025-06-01 22:02:34 -04:00 committed by GitHub
commit 8dbd29cc45
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
92 changed files with 4028 additions and 1689 deletions

View File

@ -437,8 +437,8 @@ pub fn build(b: *std.Build) !void {
.skip_non_native = skip_non_native,
.skip_libc = skip_libc,
.use_llvm = use_llvm,
// 2262585344 was observed on an x86_64-linux-gnu host.
.max_rss = 2488843878,
// 2520100864 was observed on an x86_64-linux-gnu host.
.max_rss = 2772110950,
}));
test_modules_step.dependOn(tests.addModuleTests(b, .{

View File

@ -5,4 +5,4 @@ test "integer cast panic" {
_ = b;
}
// test_error=cast truncated bits
// test_error=integer does not fit in destination type

View File

@ -1246,12 +1246,8 @@ pub const Cpu = struct {
/// Adds the specified feature set but not its dependencies.
pub fn addFeatureSet(set: *Set, other_set: Set) void {
if (builtin.zig_backend == .stage2_x86_64 and builtin.object_format == .coff) {
for (&set.ints, other_set.ints) |*set_int, other_set_int| set_int.* |= other_set_int;
} else {
set.ints = @as(@Vector(usize_count, usize), set.ints) | @as(@Vector(usize_count, usize), other_set.ints);
}
}
/// Removes the specified feature but not its dependents.
pub fn removeFeature(set: *Set, arch_feature_index: Index) void {
@ -1262,12 +1258,8 @@ pub const Cpu = struct {
/// Removes the specified feature but not its dependents.
pub fn removeFeatureSet(set: *Set, other_set: Set) void {
if (builtin.zig_backend == .stage2_x86_64 and builtin.object_format == .coff) {
for (&set.ints, other_set.ints) |*set_int, other_set_int| set_int.* &= ~other_set_int;
} else {
set.ints = @as(@Vector(usize_count, usize), set.ints) & ~@as(@Vector(usize_count, usize), other_set.ints);
}
}
pub fn populateDependencies(set: *Set, all_features_list: []const Cpu.Feature) void {
@setEvalBranchQuota(1000000);
@ -1295,18 +1287,11 @@ pub const Cpu = struct {
}
pub fn isSuperSetOf(set: Set, other_set: Set) bool {
if (builtin.zig_backend == .stage2_x86_64 and builtin.object_format == .coff) {
var result = true;
for (&set.ints, other_set.ints) |*set_int, other_set_int|
result = result and (set_int.* & other_set_int) == other_set_int;
return result;
} else {
const V = @Vector(usize_count, usize);
const set_v: V = set.ints;
const other_v: V = other_set.ints;
return @reduce(.And, (set_v & other_v) == other_v);
}
}
};
pub fn FeatureSetFns(comptime F: type) type {

View File

@ -889,19 +889,10 @@ pub fn ArrayHashMapUnmanaged(
self.pointer_stability.lock();
defer self.pointer_stability.unlock();
if (new_capacity <= linear_scan_max) {
try self.entries.ensureTotalCapacity(gpa, new_capacity);
return;
}
if (new_capacity <= linear_scan_max) return;
if (self.index_header) |header| if (new_capacity <= header.capacity()) return;
if (self.index_header) |header| {
if (new_capacity <= header.capacity()) {
try self.entries.ensureTotalCapacity(gpa, new_capacity);
return;
}
}
try self.entries.ensureTotalCapacity(gpa, new_capacity);
const new_bit_index = try IndexHeader.findBitIndex(new_capacity);
const new_header = try IndexHeader.alloc(gpa, new_bit_index);
@ -2116,7 +2107,7 @@ const IndexHeader = struct {
fn findBitIndex(desired_capacity: usize) Allocator.Error!u8 {
if (desired_capacity > max_capacity) return error.OutOfMemory;
var new_bit_index = @as(u8, @intCast(std.math.log2_int_ceil(usize, desired_capacity)));
var new_bit_index: u8 = @intCast(std.math.log2_int_ceil(usize, desired_capacity));
if (desired_capacity > index_capacities[new_bit_index]) new_bit_index += 1;
if (new_bit_index < min_bit_index) new_bit_index = min_bit_index;
assert(desired_capacity <= index_capacities[new_bit_index]);

View File

@ -499,15 +499,12 @@ fn ChaChaNonVecImpl(comptime rounds_nb: usize) type {
fn ChaChaImpl(comptime rounds_nb: usize) type {
switch (builtin.cpu.arch) {
.x86_64 => {
const has_avx2 = std.Target.x86.featureSetHas(builtin.cpu.features, .avx2);
const has_avx512f = std.Target.x86.featureSetHas(builtin.cpu.features, .avx512f);
if (builtin.zig_backend != .stage2_x86_64 and has_avx512f) return ChaChaVecImpl(rounds_nb, 4);
if (has_avx2) return ChaChaVecImpl(rounds_nb, 2);
if (builtin.zig_backend != .stage2_x86_64 and std.Target.x86.featureSetHas(builtin.cpu.features, .avx512f)) return ChaChaVecImpl(rounds_nb, 4);
if (std.Target.x86.featureSetHas(builtin.cpu.features, .avx2)) return ChaChaVecImpl(rounds_nb, 2);
return ChaChaVecImpl(rounds_nb, 1);
},
.aarch64 => {
const has_neon = std.Target.aarch64.featureSetHas(builtin.cpu.features, .neon);
if (has_neon) return ChaChaVecImpl(rounds_nb, 4);
if (builtin.zig_backend != .stage2_aarch64 and std.Target.aarch64.featureSetHas(builtin.cpu.features, .neon)) return ChaChaVecImpl(rounds_nb, 4);
return ChaChaNonVecImpl(rounds_nb);
},
else => return ChaChaNonVecImpl(rounds_nb),

View File

@ -78,13 +78,9 @@ pub fn FullPanic(comptime panicFn: fn ([]const u8, ?usize) noreturn) type {
@branchHint(.cold);
call("invalid error code", @returnAddress());
}
pub fn castTruncatedData() noreturn {
pub fn integerOutOfBounds() noreturn {
@branchHint(.cold);
call("integer cast truncated bits", @returnAddress());
}
pub fn negativeToUnsigned() noreturn {
@branchHint(.cold);
call("attempt to cast negative value to unsigned integer", @returnAddress());
call("integer does not fit in destination type", @returnAddress());
}
pub fn integerOverflow() noreturn {
@branchHint(.cold);
@ -126,8 +122,6 @@ pub fn FullPanic(comptime panicFn: fn ([]const u8, ?usize) noreturn) type {
@branchHint(.cold);
call("for loop over objects with non-equal lengths", @returnAddress());
}
/// Delete after next zig1.wasm update
pub const memcpyLenMismatch = copyLenMismatch;
pub fn copyLenMismatch() noreturn {
@branchHint(.cold);
call("source and destination arguments have non-equal lengths", @returnAddress());

View File

@ -65,12 +65,7 @@ pub fn invalidErrorCode() noreturn {
@trap();
}
pub fn castTruncatedData() noreturn {
@branchHint(.cold);
@trap();
}
pub fn negativeToUnsigned() noreturn {
pub fn integerOutOfBounds() noreturn {
@branchHint(.cold);
@trap();
}
@ -125,9 +120,6 @@ pub fn forLenMismatch() noreturn {
@trap();
}
/// Delete after next zig1.wasm update
pub const memcpyLenMismatch = copyLenMismatch;
pub fn copyLenMismatch() noreturn {
@branchHint(.cold);
@trap();

View File

@ -72,12 +72,8 @@ pub fn invalidErrorCode() noreturn {
call("invalid error code", null);
}
pub fn castTruncatedData() noreturn {
call("integer cast truncated bits", null);
}
pub fn negativeToUnsigned() noreturn {
call("attempt to cast negative value to unsigned integer", null);
pub fn integerOutOfBounds() noreturn {
call("integer does not fit in destination type", null);
}
pub fn integerOverflow() noreturn {
@ -120,9 +116,6 @@ pub fn forLenMismatch() noreturn {
call("for loop over objects with non-equal lengths", null);
}
/// Delete after next zig1.wasm update
pub const memcpyLenMismatch = copyLenMismatch;
pub fn copyLenMismatch() noreturn {
call("source and destination have non-equal lengths", null);
}

View File

@ -780,7 +780,6 @@ fn testExpect(comptime H: type, seed: anytype, input: []const u8, expected: u64)
}
test "xxhash3" {
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
if (builtin.cpu.arch.isMIPS64() and (builtin.abi == .gnuabin32 or builtin.abi == .muslabin32)) return error.SkipZigTest; // https://github.com/ziglang/zig/issues/23807
const H = XxHash3;
@ -814,7 +813,6 @@ test "xxhash3" {
}
test "xxhash3 smhasher" {
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
if (builtin.cpu.arch.isMIPS64() and (builtin.abi == .gnuabin32 or builtin.abi == .muslabin32)) return error.SkipZigTest; // https://github.com/ziglang/zig/issues/23807
const Test = struct {
@ -828,7 +826,6 @@ test "xxhash3 smhasher" {
}
test "xxhash3 iterative api" {
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
if (builtin.cpu.arch.isMIPS64() and (builtin.abi == .gnuabin32 or builtin.abi == .muslabin32)) return error.SkipZigTest; // https://github.com/ziglang/zig/issues/23807
const Test = struct {

View File

@ -231,8 +231,6 @@ pub fn extract(
}
test "vector patterns" {
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
const base = @Vector(4, u32){ 10, 20, 30, 40 };
const other_base = @Vector(4, u32){ 55, 66, 77, 88 };
@ -302,8 +300,6 @@ pub fn reverseOrder(vec: anytype) @TypeOf(vec) {
}
test "vector shifting" {
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
const base = @Vector(4, u32){ 10, 20, 30, 40 };
try std.testing.expectEqual([4]u32{ 30, 40, 999, 999 }, shiftElementsLeft(base, 2, 999));
@ -368,9 +364,6 @@ pub fn countElementsWithValue(vec: anytype, value: std.meta.Child(@TypeOf(vec)))
}
test "vector searching" {
if (builtin.zig_backend == .stage2_x86_64 and
!comptime std.Target.x86.featureSetHas(builtin.cpu.features, .ssse3)) return error.SkipZigTest;
const base = @Vector(8, u32){ 6, 4, 7, 4, 4, 2, 3, 7 };
try std.testing.expectEqual(@as(?u3, 1), firstIndexOfValue(base, 4));
@ -462,7 +455,6 @@ pub fn prefixScan(comptime op: std.builtin.ReduceOp, comptime hop: isize, vec: a
}
test "vector prefix scan" {
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
if ((builtin.cpu.arch == .armeb or builtin.cpu.arch == .thumbeb) and builtin.zig_backend == .stage2_llvm) return error.SkipZigTest; // https://github.com/ziglang/zig/issues/22060
if (builtin.cpu.arch == .aarch64_be and builtin.zig_backend == .stage2_llvm) return error.SkipZigTest; // https://github.com/ziglang/zig/issues/21893
if (builtin.zig_backend == .stage2_llvm and builtin.cpu.arch == .hexagon) return error.SkipZigTest;

View File

@ -11194,6 +11194,7 @@ fn rvalueInner(
const as_void = @as(u64, @intFromEnum(Zir.Inst.Ref.void_type)) << 32;
const as_comptime_int = @as(u64, @intFromEnum(Zir.Inst.Ref.comptime_int_type)) << 32;
const as_usize = @as(u64, @intFromEnum(Zir.Inst.Ref.usize_type)) << 32;
const as_u1 = @as(u64, @intFromEnum(Zir.Inst.Ref.u1_type)) << 32;
const as_u8 = @as(u64, @intFromEnum(Zir.Inst.Ref.u8_type)) << 32;
switch ((@as(u64, @intFromEnum(ty_inst)) << 32) | @as(u64, @intFromEnum(result))) {
as_ty | @intFromEnum(Zir.Inst.Ref.u1_type),
@ -11237,10 +11238,11 @@ fn rvalueInner(
as_ty | @intFromEnum(Zir.Inst.Ref.null_type),
as_ty | @intFromEnum(Zir.Inst.Ref.undefined_type),
as_ty | @intFromEnum(Zir.Inst.Ref.enum_literal_type),
as_ty | @intFromEnum(Zir.Inst.Ref.ptr_usize_type),
as_ty | @intFromEnum(Zir.Inst.Ref.ptr_const_comptime_int_type),
as_ty | @intFromEnum(Zir.Inst.Ref.manyptr_u8_type),
as_ty | @intFromEnum(Zir.Inst.Ref.manyptr_const_u8_type),
as_ty | @intFromEnum(Zir.Inst.Ref.manyptr_const_u8_sentinel_0_type),
as_ty | @intFromEnum(Zir.Inst.Ref.single_const_pointer_to_comptime_int_type),
as_ty | @intFromEnum(Zir.Inst.Ref.slice_const_u8_type),
as_ty | @intFromEnum(Zir.Inst.Ref.slice_const_u8_sentinel_0_type),
as_ty | @intFromEnum(Zir.Inst.Ref.anyerror_void_error_union_type),
@ -11249,27 +11251,45 @@ fn rvalueInner(
as_comptime_int | @intFromEnum(Zir.Inst.Ref.zero),
as_comptime_int | @intFromEnum(Zir.Inst.Ref.one),
as_comptime_int | @intFromEnum(Zir.Inst.Ref.negative_one),
as_usize | @intFromEnum(Zir.Inst.Ref.undef_usize),
as_usize | @intFromEnum(Zir.Inst.Ref.zero_usize),
as_usize | @intFromEnum(Zir.Inst.Ref.one_usize),
as_u1 | @intFromEnum(Zir.Inst.Ref.undef_u1),
as_u1 | @intFromEnum(Zir.Inst.Ref.zero_u1),
as_u1 | @intFromEnum(Zir.Inst.Ref.one_u1),
as_u8 | @intFromEnum(Zir.Inst.Ref.zero_u8),
as_u8 | @intFromEnum(Zir.Inst.Ref.one_u8),
as_u8 | @intFromEnum(Zir.Inst.Ref.four_u8),
as_bool | @intFromEnum(Zir.Inst.Ref.undef_bool),
as_bool | @intFromEnum(Zir.Inst.Ref.bool_true),
as_bool | @intFromEnum(Zir.Inst.Ref.bool_false),
as_void | @intFromEnum(Zir.Inst.Ref.void_value),
=> return result, // type of result is already correct
as_bool | @intFromEnum(Zir.Inst.Ref.undef) => return .undef_bool,
as_usize | @intFromEnum(Zir.Inst.Ref.undef) => return .undef_usize,
as_usize | @intFromEnum(Zir.Inst.Ref.undef_u1) => return .undef_usize,
as_u1 | @intFromEnum(Zir.Inst.Ref.undef) => return .undef_u1,
as_usize | @intFromEnum(Zir.Inst.Ref.zero) => return .zero_usize,
as_u1 | @intFromEnum(Zir.Inst.Ref.zero) => return .zero_u1,
as_u8 | @intFromEnum(Zir.Inst.Ref.zero) => return .zero_u8,
as_usize | @intFromEnum(Zir.Inst.Ref.one) => return .one_usize,
as_u1 | @intFromEnum(Zir.Inst.Ref.one) => return .one_u1,
as_u8 | @intFromEnum(Zir.Inst.Ref.one) => return .one_u8,
as_comptime_int | @intFromEnum(Zir.Inst.Ref.zero_usize) => return .zero,
as_u1 | @intFromEnum(Zir.Inst.Ref.zero_usize) => return .zero_u1,
as_u8 | @intFromEnum(Zir.Inst.Ref.zero_usize) => return .zero_u8,
as_comptime_int | @intFromEnum(Zir.Inst.Ref.one_usize) => return .one,
as_u1 | @intFromEnum(Zir.Inst.Ref.one_usize) => return .one_u1,
as_u8 | @intFromEnum(Zir.Inst.Ref.one_usize) => return .one_u8,
as_comptime_int | @intFromEnum(Zir.Inst.Ref.zero_u1) => return .zero,
as_comptime_int | @intFromEnum(Zir.Inst.Ref.zero_u8) => return .zero,
as_usize | @intFromEnum(Zir.Inst.Ref.zero_u1) => return .zero_usize,
as_usize | @intFromEnum(Zir.Inst.Ref.zero_u8) => return .zero_usize,
as_comptime_int | @intFromEnum(Zir.Inst.Ref.one_u1) => return .one,
as_comptime_int | @intFromEnum(Zir.Inst.Ref.one_u8) => return .one,
as_usize | @intFromEnum(Zir.Inst.Ref.one_u1) => return .one_usize,
as_usize | @intFromEnum(Zir.Inst.Ref.one_u8) => return .one_usize,
// Need an explicit type coercion instruction.

View File

@ -2142,7 +2142,7 @@ pub const Inst = struct {
ref_start_index = static_len,
_,
pub const static_len = 118;
pub const static_len = 124;
pub fn toRef(i: Index) Inst.Ref {
return @enumFromInt(@intFromEnum(Index.ref_start_index) + @intFromEnum(i));
@ -2220,10 +2220,11 @@ pub const Inst = struct {
null_type,
undefined_type,
enum_literal_type,
ptr_usize_type,
ptr_const_comptime_int_type,
manyptr_u8_type,
manyptr_const_u8_type,
manyptr_const_u8_sentinel_0_type,
single_const_pointer_to_comptime_int_type,
slice_const_u8_type,
slice_const_u8_sentinel_0_type,
vector_8_i8_type,
@ -2279,11 +2280,16 @@ pub const Inst = struct {
generic_poison_type,
empty_tuple_type,
undef,
undef_bool,
undef_usize,
undef_u1,
zero,
zero_usize,
zero_u1,
zero_u8,
one,
one_usize,
one_u1,
one_u8,
four_u8,
negative_one,

View File

@ -50,8 +50,6 @@ pub const Inst = struct {
/// is the same as both operands.
/// The panic handler function must be populated before lowering AIR
/// that contains this instruction.
/// This instruction will only be emitted if the backend has the
/// feature `safety_checked_instructions`.
/// Uses the `bin_op` field.
add_safe,
/// Float addition. The instruction is allowed to have equal or more
@ -79,8 +77,6 @@ pub const Inst = struct {
/// is the same as both operands.
/// The panic handler function must be populated before lowering AIR
/// that contains this instruction.
/// This instruction will only be emitted if the backend has the
/// feature `safety_checked_instructions`.
/// Uses the `bin_op` field.
sub_safe,
/// Float subtraction. The instruction is allowed to have equal or more
@ -108,8 +104,6 @@ pub const Inst = struct {
/// is the same as both operands.
/// The panic handler function must be populated before lowering AIR
/// that contains this instruction.
/// This instruction will only be emitted if the backend has the
/// feature `safety_checked_instructions`.
/// Uses the `bin_op` field.
mul_safe,
/// Float multiplication. The instruction is allowed to have equal or more
@ -705,9 +699,21 @@ pub const Inst = struct {
/// equal to the scalar value.
/// Uses the `ty_op` field.
splat,
/// Constructs a vector by selecting elements from `a` and `b` based on `mask`.
/// Uses the `ty_pl` field with payload `Shuffle`.
shuffle,
/// Constructs a vector by selecting elements from a single vector based on a mask. Each
/// mask element is either an index into the vector, or a comptime-known value, or "undef".
/// Uses the `ty_pl` field, where the payload index points to:
/// 1. mask_elem: ShuffleOneMask // for each `mask_len`, which comes from `ty_pl.ty`
/// 2. operand: Ref // guaranteed not to be an interned value
/// See `unwrapShuffleOne`.
shuffle_one,
/// Constructs a vector by selecting elements from two vectors based on a mask. Each mask
/// element is either an index into one of the vectors, or "undef".
/// Uses the `ty_pl` field, where the payload index points to:
/// 1. mask_elem: ShuffleOneMask // for each `mask_len`, which comes from `ty_pl.ty`
/// 2. operand_a: Ref // guaranteed not to be an interned value
/// 3. operand_b: Ref // guaranteed not to be an interned value
/// See `unwrapShuffleTwo`.
shuffle_two,
/// Constructs a vector element-wise from `a` or `b` based on `pred`.
/// Uses the `pl_op` field with `pred` as operand, and payload `Bin`.
select,
@ -1011,10 +1017,11 @@ pub const Inst = struct {
null_type = @intFromEnum(InternPool.Index.null_type),
undefined_type = @intFromEnum(InternPool.Index.undefined_type),
enum_literal_type = @intFromEnum(InternPool.Index.enum_literal_type),
ptr_usize_type = @intFromEnum(InternPool.Index.ptr_usize_type),
ptr_const_comptime_int_type = @intFromEnum(InternPool.Index.ptr_const_comptime_int_type),
manyptr_u8_type = @intFromEnum(InternPool.Index.manyptr_u8_type),
manyptr_const_u8_type = @intFromEnum(InternPool.Index.manyptr_const_u8_type),
manyptr_const_u8_sentinel_0_type = @intFromEnum(InternPool.Index.manyptr_const_u8_sentinel_0_type),
single_const_pointer_to_comptime_int_type = @intFromEnum(InternPool.Index.single_const_pointer_to_comptime_int_type),
slice_const_u8_type = @intFromEnum(InternPool.Index.slice_const_u8_type),
slice_const_u8_sentinel_0_type = @intFromEnum(InternPool.Index.slice_const_u8_sentinel_0_type),
vector_8_i8_type = @intFromEnum(InternPool.Index.vector_8_i8_type),
@ -1070,11 +1077,16 @@ pub const Inst = struct {
generic_poison_type = @intFromEnum(InternPool.Index.generic_poison_type),
empty_tuple_type = @intFromEnum(InternPool.Index.empty_tuple_type),
undef = @intFromEnum(InternPool.Index.undef),
undef_bool = @intFromEnum(InternPool.Index.undef_bool),
undef_usize = @intFromEnum(InternPool.Index.undef_usize),
undef_u1 = @intFromEnum(InternPool.Index.undef_u1),
zero = @intFromEnum(InternPool.Index.zero),
zero_usize = @intFromEnum(InternPool.Index.zero_usize),
zero_u1 = @intFromEnum(InternPool.Index.zero_u1),
zero_u8 = @intFromEnum(InternPool.Index.zero_u8),
one = @intFromEnum(InternPool.Index.one),
one_usize = @intFromEnum(InternPool.Index.one_usize),
one_u1 = @intFromEnum(InternPool.Index.one_u1),
one_u8 = @intFromEnum(InternPool.Index.one_u8),
four_u8 = @intFromEnum(InternPool.Index.four_u8),
negative_one = @intFromEnum(InternPool.Index.negative_one),
@ -1121,7 +1133,7 @@ pub const Inst = struct {
}
pub fn toType(ref: Ref) Type {
return Type.fromInterned(ref.toInterned().?);
return .fromInterned(ref.toInterned().?);
}
};
@ -1241,10 +1253,10 @@ pub const CondBr = struct {
else_body_len: u32,
branch_hints: BranchHints,
pub const BranchHints = packed struct(u32) {
true: std.builtin.BranchHint,
false: std.builtin.BranchHint,
then_cov: CoveragePoint,
else_cov: CoveragePoint,
true: std.builtin.BranchHint = .none,
false: std.builtin.BranchHint = .none,
then_cov: CoveragePoint = .none,
else_cov: CoveragePoint = .none,
_: u24 = 0,
};
};
@ -1299,13 +1311,6 @@ pub const FieldParentPtr = struct {
field_index: u32,
};
pub const Shuffle = struct {
a: Inst.Ref,
b: Inst.Ref,
mask: InternPool.Index,
mask_len: u32,
};
pub const VectorCmp = struct {
lhs: Inst.Ref,
rhs: Inst.Ref,
@ -1320,6 +1325,64 @@ pub const VectorCmp = struct {
}
};
/// Used by `Inst.Tag.shuffle_one`. Represents a mask element which either indexes into a
/// runtime-known vector, or is a comptime-known value.
pub const ShuffleOneMask = packed struct(u32) {
index: u31,
kind: enum(u1) { elem, value },
pub fn elem(idx: u32) ShuffleOneMask {
return .{ .index = @intCast(idx), .kind = .elem };
}
pub fn value(val: Value) ShuffleOneMask {
return .{ .index = @intCast(@intFromEnum(val.toIntern())), .kind = .value };
}
pub const Unwrapped = union(enum) {
/// The resulting element is this index into the runtime vector.
elem: u32,
/// The resulting element is this comptime-known value.
/// It is correctly typed. It might be `undefined`.
value: InternPool.Index,
};
pub fn unwrap(raw: ShuffleOneMask) Unwrapped {
return switch (raw.kind) {
.elem => .{ .elem = raw.index },
.value => .{ .value = @enumFromInt(raw.index) },
};
}
};
/// Used by `Inst.Tag.shuffle_two`. Represents a mask element which either indexes into one
/// of two runtime-known vectors, or is undefined.
pub const ShuffleTwoMask = enum(u32) {
undef = std.math.maxInt(u32),
_,
pub fn aElem(idx: u32) ShuffleTwoMask {
return @enumFromInt(idx << 1);
}
pub fn bElem(idx: u32) ShuffleTwoMask {
return @enumFromInt(idx << 1 | 1);
}
pub const Unwrapped = union(enum) {
/// The resulting element is this index into the first runtime vector.
a_elem: u32,
/// The resulting element is this index into the second runtime vector.
b_elem: u32,
/// The resulting element is `undefined`.
undef,
};
pub fn unwrap(raw: ShuffleTwoMask) Unwrapped {
switch (raw) {
.undef => return .undef,
_ => {},
}
const x = @intFromEnum(raw);
return switch (@as(u1, @truncate(x))) {
0 => .{ .a_elem = x >> 1 },
1 => .{ .b_elem = x >> 1 },
};
}
};
/// Trailing:
/// 0. `Inst.Ref` for every outputs_len
/// 1. `Inst.Ref` for every inputs_len
@ -1393,7 +1456,7 @@ pub fn getMainBody(air: Air) []const Air.Inst.Index {
pub fn typeOf(air: *const Air, inst: Air.Inst.Ref, ip: *const InternPool) Type {
if (inst.toInterned()) |ip_index| {
return Type.fromInterned(ip.typeOf(ip_index));
return .fromInterned(ip.typeOf(ip_index));
} else {
return air.typeOfIndex(inst.toIndex().?, ip);
}
@ -1483,7 +1546,7 @@ pub fn typeOfIndex(air: *const Air, inst: Air.Inst.Index, ip: *const InternPool)
.is_non_err_ptr,
.is_named_enum_value,
.error_set_has_value,
=> return Type.bool,
=> return .bool,
.alloc,
.ret_ptr,
@ -1503,7 +1566,6 @@ pub fn typeOfIndex(air: *const Air, inst: Air.Inst.Index, ip: *const InternPool)
.cmpxchg_weak,
.cmpxchg_strong,
.slice,
.shuffle,
.aggregate_init,
.union_init,
.field_parent_ptr,
@ -1517,6 +1579,8 @@ pub fn typeOfIndex(air: *const Air, inst: Air.Inst.Index, ip: *const InternPool)
.ptr_sub,
.try_ptr,
.try_ptr_cold,
.shuffle_one,
.shuffle_two,
=> return datas[@intFromEnum(inst)].ty_pl.ty.toType(),
.not,
@ -1574,7 +1638,7 @@ pub fn typeOfIndex(air: *const Air, inst: Air.Inst.Index, ip: *const InternPool)
.ret_load,
.unreach,
.trap,
=> return Type.noreturn,
=> return .noreturn,
.breakpoint,
.dbg_stmt,
@ -1597,22 +1661,22 @@ pub fn typeOfIndex(air: *const Air, inst: Air.Inst.Index, ip: *const InternPool)
.set_err_return_trace,
.vector_store_elem,
.c_va_end,
=> return Type.void,
=> return .void,
.slice_len,
.ret_addr,
.frame_addr,
.save_err_return_trace_index,
=> return Type.usize,
=> return .usize,
.wasm_memory_grow => return Type.isize,
.wasm_memory_size => return Type.usize,
.wasm_memory_grow => return .isize,
.wasm_memory_size => return .usize,
.tag_name, .error_name => return Type.slice_const_u8_sentinel_0,
.tag_name, .error_name => return .slice_const_u8_sentinel_0,
.call, .call_always_tail, .call_never_tail, .call_never_inline => {
const callee_ty = air.typeOf(datas[@intFromEnum(inst)].pl_op.operand, ip);
return Type.fromInterned(ip.funcTypeReturnType(callee_ty.toIntern()));
return .fromInterned(ip.funcTypeReturnType(callee_ty.toIntern()));
},
.slice_elem_val, .ptr_elem_val, .array_elem_val => {
@ -1630,7 +1694,7 @@ pub fn typeOfIndex(air: *const Air, inst: Air.Inst.Index, ip: *const InternPool)
.reduce, .reduce_optimized => {
const operand_ty = air.typeOf(datas[@intFromEnum(inst)].reduce.operand, ip);
return Type.fromInterned(ip.indexToKey(operand_ty.ip_index).vector_type.child);
return .fromInterned(ip.indexToKey(operand_ty.ip_index).vector_type.child);
},
.mul_add => return air.typeOf(datas[@intFromEnum(inst)].pl_op.operand, ip),
@ -1641,7 +1705,7 @@ pub fn typeOfIndex(air: *const Air, inst: Air.Inst.Index, ip: *const InternPool)
.@"try", .try_cold => {
const err_union_ty = air.typeOf(datas[@intFromEnum(inst)].pl_op.operand, ip);
return Type.fromInterned(ip.indexToKey(err_union_ty.ip_index).error_union_type.payload_type);
return .fromInterned(ip.indexToKey(err_union_ty.ip_index).error_union_type.payload_type);
},
.tlv_dllimport_ptr => return .fromInterned(datas[@intFromEnum(inst)].ty_nav.ty),
@ -1649,7 +1713,7 @@ pub fn typeOfIndex(air: *const Air, inst: Air.Inst.Index, ip: *const InternPool)
.work_item_id,
.work_group_size,
.work_group_id,
=> return Type.u32,
=> return .u32,
.inferred_alloc => unreachable,
.inferred_alloc_comptime => unreachable,
@ -1696,7 +1760,7 @@ pub fn internedToRef(ip_index: InternPool.Index) Inst.Ref {
/// Returns `null` if runtime-known.
pub fn value(air: Air, inst: Inst.Ref, pt: Zcu.PerThread) !?Value {
if (inst.toInterned()) |ip_index| {
return Value.fromInterned(ip_index);
return .fromInterned(ip_index);
}
const index = inst.toIndex().?;
return air.typeOfIndex(index, &pt.zcu.intern_pool).onePossibleValue(pt);
@ -1903,7 +1967,8 @@ pub fn mustLower(air: Air, inst: Air.Inst.Index, ip: *const InternPool) bool {
.reduce,
.reduce_optimized,
.splat,
.shuffle,
.shuffle_one,
.shuffle_two,
.select,
.is_named_enum_value,
.tag_name,
@ -2030,6 +2095,48 @@ pub fn unwrapSwitch(air: *const Air, switch_inst: Inst.Index) UnwrappedSwitch {
};
}
pub fn unwrapShuffleOne(air: *const Air, zcu: *const Zcu, inst_index: Inst.Index) struct {
result_ty: Type,
operand: Inst.Ref,
mask: []const ShuffleOneMask,
} {
const inst = air.instructions.get(@intFromEnum(inst_index));
switch (inst.tag) {
.shuffle_one => {},
else => unreachable, // assertion failure
}
const result_ty: Type = .fromInterned(inst.data.ty_pl.ty.toInterned().?);
const mask_len: u32 = result_ty.vectorLen(zcu);
const extra_idx = inst.data.ty_pl.payload;
return .{
.result_ty = result_ty,
.operand = @enumFromInt(air.extra.items[extra_idx + mask_len]),
.mask = @ptrCast(air.extra.items[extra_idx..][0..mask_len]),
};
}
pub fn unwrapShuffleTwo(air: *const Air, zcu: *const Zcu, inst_index: Inst.Index) struct {
result_ty: Type,
operand_a: Inst.Ref,
operand_b: Inst.Ref,
mask: []const ShuffleTwoMask,
} {
const inst = air.instructions.get(@intFromEnum(inst_index));
switch (inst.tag) {
.shuffle_two => {},
else => unreachable, // assertion failure
}
const result_ty: Type = .fromInterned(inst.data.ty_pl.ty.toInterned().?);
const mask_len: u32 = result_ty.vectorLen(zcu);
const extra_idx = inst.data.ty_pl.payload;
return .{
.result_ty = result_ty,
.operand_a = @enumFromInt(air.extra.items[extra_idx + mask_len + 0]),
.operand_b = @enumFromInt(air.extra.items[extra_idx + mask_len + 1]),
.mask = @ptrCast(air.extra.items[extra_idx..][0..mask_len]),
};
}
pub const typesFullyResolved = types_resolved.typesFullyResolved;
pub const typeFullyResolved = types_resolved.checkType;
pub const valFullyResolved = types_resolved.checkVal;

File diff suppressed because it is too large Load Diff

View File

@ -15,6 +15,7 @@ const Liveness = @This();
const trace = @import("../tracy.zig").trace;
const Air = @import("../Air.zig");
const InternPool = @import("../InternPool.zig");
const Zcu = @import("../Zcu.zig");
pub const Verify = @import("Liveness/Verify.zig");
@ -136,12 +137,15 @@ fn LivenessPassData(comptime pass: LivenessPass) type {
};
}
pub fn analyze(gpa: Allocator, air: Air, intern_pool: *InternPool) Allocator.Error!Liveness {
pub fn analyze(zcu: *Zcu, air: Air, intern_pool: *InternPool) Allocator.Error!Liveness {
const tracy = trace(@src());
defer tracy.end();
const gpa = zcu.gpa;
var a: Analysis = .{
.gpa = gpa,
.zcu = zcu,
.air = air,
.tomb_bits = try gpa.alloc(
usize,
@ -220,6 +224,7 @@ const OperandCategory = enum {
pub fn categorizeOperand(
l: Liveness,
air: Air,
zcu: *Zcu,
inst: Air.Inst.Index,
operand: Air.Inst.Index,
ip: *const InternPool,
@ -511,10 +516,15 @@ pub fn categorizeOperand(
if (extra.rhs == operand_ref) return matchOperandSmallIndex(l, inst, 2, .none);
return .none;
},
.shuffle => {
const extra = air.extraData(Air.Shuffle, air_datas[@intFromEnum(inst)].ty_pl.payload).data;
if (extra.a == operand_ref) return matchOperandSmallIndex(l, inst, 0, .none);
if (extra.b == operand_ref) return matchOperandSmallIndex(l, inst, 1, .none);
.shuffle_one => {
const unwrapped = air.unwrapShuffleOne(zcu, inst);
if (unwrapped.operand == operand_ref) return matchOperandSmallIndex(l, inst, 0, .none);
return .none;
},
.shuffle_two => {
const unwrapped = air.unwrapShuffleTwo(zcu, inst);
if (unwrapped.operand_a == operand_ref) return matchOperandSmallIndex(l, inst, 0, .none);
if (unwrapped.operand_b == operand_ref) return matchOperandSmallIndex(l, inst, 1, .none);
return .none;
},
.reduce, .reduce_optimized => {
@ -639,7 +649,7 @@ pub fn categorizeOperand(
var operand_live: bool = true;
for (&[_]Air.Inst.Index{ then_body[0], else_body[0] }) |cond_inst| {
if (l.categorizeOperand(air, cond_inst, operand, ip) == .tomb)
if (l.categorizeOperand(air, zcu, cond_inst, operand, ip) == .tomb)
operand_live = false;
switch (air_tags[@intFromEnum(cond_inst)]) {
@ -824,6 +834,7 @@ pub const BigTomb = struct {
/// In-progress data; on successful analysis converted into `Liveness`.
const Analysis = struct {
gpa: Allocator,
zcu: *Zcu,
air: Air,
intern_pool: *InternPool,
tomb_bits: []usize,
@ -1119,9 +1130,13 @@ fn analyzeInst(
const extra = a.air.extraData(Air.Bin, pl_op.payload).data;
return analyzeOperands(a, pass, data, inst, .{ pl_op.operand, extra.lhs, extra.rhs });
},
.shuffle => {
const extra = a.air.extraData(Air.Shuffle, inst_datas[@intFromEnum(inst)].ty_pl.payload).data;
return analyzeOperands(a, pass, data, inst, .{ extra.a, extra.b, .none });
.shuffle_one => {
const unwrapped = a.air.unwrapShuffleOne(a.zcu, inst);
return analyzeOperands(a, pass, data, inst, .{ unwrapped.operand, .none, .none });
},
.shuffle_two => {
const unwrapped = a.air.unwrapShuffleTwo(a.zcu, inst);
return analyzeOperands(a, pass, data, inst, .{ unwrapped.operand_a, unwrapped.operand_b, .none });
},
.reduce, .reduce_optimized => {
const reduce = inst_datas[@intFromEnum(inst)].reduce;

View File

@ -1,6 +1,7 @@
//! Verifies that Liveness information is valid.
gpa: std.mem.Allocator,
zcu: *Zcu,
air: Air,
liveness: Liveness,
live: LiveMap = .{},
@ -287,10 +288,13 @@ fn verifyBody(self: *Verify, body: []const Air.Inst.Index) Error!void {
const extra = self.air.extraData(Air.Bin, ty_pl.payload).data;
try self.verifyInstOperands(inst, .{ extra.lhs, extra.rhs, .none });
},
.shuffle => {
const ty_pl = data[@intFromEnum(inst)].ty_pl;
const extra = self.air.extraData(Air.Shuffle, ty_pl.payload).data;
try self.verifyInstOperands(inst, .{ extra.a, extra.b, .none });
.shuffle_one => {
const unwrapped = self.air.unwrapShuffleOne(self.zcu, inst);
try self.verifyInstOperands(inst, .{ unwrapped.operand, .none, .none });
},
.shuffle_two => {
const unwrapped = self.air.unwrapShuffleTwo(self.zcu, inst);
try self.verifyInstOperands(inst, .{ unwrapped.operand_a, unwrapped.operand_b, .none });
},
.cmp_vector,
.cmp_vector_optimized,
@ -639,4 +643,5 @@ const log = std.log.scoped(.liveness_verify);
const Air = @import("../../Air.zig");
const Liveness = @import("../Liveness.zig");
const InternPool = @import("../../InternPool.zig");
const Zcu = @import("../../Zcu.zig");
const Verify = @This();

View File

@ -249,12 +249,22 @@ fn checkBody(air: Air, body: []const Air.Inst.Index, zcu: *Zcu) bool {
if (!checkRef(extra.struct_operand, zcu)) return false;
},
.shuffle => {
const extra = air.extraData(Air.Shuffle, data.ty_pl.payload).data;
if (!checkType(data.ty_pl.ty.toType(), zcu)) return false;
if (!checkRef(extra.a, zcu)) return false;
if (!checkRef(extra.b, zcu)) return false;
if (!checkVal(Value.fromInterned(extra.mask), zcu)) return false;
.shuffle_one => {
const unwrapped = air.unwrapShuffleOne(zcu, inst);
if (!checkType(unwrapped.result_ty, zcu)) return false;
if (!checkRef(unwrapped.operand, zcu)) return false;
for (unwrapped.mask) |m| switch (m.unwrap()) {
.elem => {},
.value => |val| if (!checkVal(.fromInterned(val), zcu)) return false,
};
},
.shuffle_two => {
const unwrapped = air.unwrapShuffleTwo(zcu, inst);
if (!checkType(unwrapped.result_ty, zcu)) return false;
if (!checkRef(unwrapped.operand_a, zcu)) return false;
if (!checkRef(unwrapped.operand_b, zcu)) return false;
// No values to check because there are no comptime-known values other than undef
},
.cmpxchg_weak,

View File

@ -2529,6 +2529,7 @@ pub fn destroy(comp: *Compilation) void {
pub fn clearMiscFailures(comp: *Compilation) void {
comp.alloc_failure_occurred = false;
comp.link_diags.flags = .{};
for (comp.misc_failures.values()) |*value| {
value.deinit(comp.gpa);
}
@ -2795,7 +2796,6 @@ pub fn update(comp: *Compilation, main_progress_node: std.Progress.Node) !void {
if (anyErrors(comp)) {
// Skip flushing and keep source files loaded for error reporting.
comp.link_diags.flags = .{};
return;
}

View File

@ -4579,10 +4579,11 @@ pub const Index = enum(u32) {
undefined_type,
enum_literal_type,
ptr_usize_type,
ptr_const_comptime_int_type,
manyptr_u8_type,
manyptr_const_u8_type,
manyptr_const_u8_sentinel_0_type,
single_const_pointer_to_comptime_int_type,
slice_const_u8_type,
slice_const_u8_sentinel_0_type,
@ -4649,19 +4650,29 @@ pub const Index = enum(u32) {
/// `undefined` (untyped)
undef,
/// `@as(bool, undefined)`
undef_bool,
/// `@as(usize, undefined)`
undef_usize,
/// `@as(u1, undefined)`
undef_u1,
/// `0` (comptime_int)
zero,
/// `0` (usize)
/// `@as(usize, 0)`
zero_usize,
/// `0` (u8)
/// `@as(u1, 0)`
zero_u1,
/// `@as(u8, 0)`
zero_u8,
/// `1` (comptime_int)
one,
/// `1` (usize)
/// `@as(usize, 1)`
one_usize,
/// `1` (u8)
/// `@as(u1, 1)`
one_u1,
/// `@as(u8, 1)`
one_u8,
/// `4` (u8)
/// `@as(u8, 4)`
four_u8,
/// `-1` (comptime_int)
negative_one,
@ -5074,6 +5085,20 @@ pub const static_keys: [static_len]Key = .{
.{ .simple_type = .undefined },
.{ .simple_type = .enum_literal },
// *usize
.{ .ptr_type = .{
.child = .usize_type,
.flags = .{},
} },
// *const comptime_int
.{ .ptr_type = .{
.child = .comptime_int_type,
.flags = .{
.is_const = true,
},
} },
// [*]u8
.{ .ptr_type = .{
.child = .u8_type,
@ -5101,15 +5126,6 @@ pub const static_keys: [static_len]Key = .{
},
} },
// *const comptime_int
.{ .ptr_type = .{
.child = .comptime_int_type,
.flags = .{
.size = .one,
.is_const = true,
},
} },
// []const u8
.{ .ptr_type = .{
.child = .u8_type,
@ -5245,6 +5261,9 @@ pub const static_keys: [static_len]Key = .{
} },
.{ .simple_value = .undefined },
.{ .undef = .bool_type },
.{ .undef = .usize_type },
.{ .undef = .u1_type },
.{ .int = .{
.ty = .comptime_int_type,
@ -5256,6 +5275,11 @@ pub const static_keys: [static_len]Key = .{
.storage = .{ .u64 = 0 },
} },
.{ .int = .{
.ty = .u1_type,
.storage = .{ .u64 = 0 },
} },
.{ .int = .{
.ty = .u8_type,
.storage = .{ .u64 = 0 },
@ -5271,17 +5295,21 @@ pub const static_keys: [static_len]Key = .{
.storage = .{ .u64 = 1 },
} },
// one_u8
.{ .int = .{
.ty = .u1_type,
.storage = .{ .u64 = 1 },
} },
.{ .int = .{
.ty = .u8_type,
.storage = .{ .u64 = 1 },
} },
// four_u8
.{ .int = .{
.ty = .u8_type,
.storage = .{ .u64 = 4 },
} },
// negative_one
.{ .int = .{
.ty = .comptime_int_type,
.storage = .{ .i64 = -1 },
@ -10482,7 +10510,7 @@ pub fn getCoerced(
.base_addr = .int,
.byte_offset = 0,
} }),
.len = try ip.get(gpa, tid, .{ .undef = .usize_type }),
.len = .undef_usize,
} }),
};
},
@ -10601,7 +10629,7 @@ pub fn getCoerced(
.base_addr = .int,
.byte_offset = 0,
} }),
.len = try ip.get(gpa, tid, .{ .undef = .usize_type }),
.len = .undef_usize,
} }),
},
else => |payload| try ip.getCoerced(gpa, tid, payload, new_ty),
@ -11847,10 +11875,11 @@ pub fn typeOf(ip: *const InternPool, index: Index) Index {
.null_type,
.undefined_type,
.enum_literal_type,
.ptr_usize_type,
.ptr_const_comptime_int_type,
.manyptr_u8_type,
.manyptr_const_u8_type,
.manyptr_const_u8_sentinel_0_type,
.single_const_pointer_to_comptime_int_type,
.slice_const_u8_type,
.slice_const_u8_sentinel_0_type,
.vector_8_i8_type,
@ -11909,12 +11938,13 @@ pub fn typeOf(ip: *const InternPool, index: Index) Index {
.undef => .undefined_type,
.zero, .one, .negative_one => .comptime_int_type,
.zero_usize, .one_usize => .usize_type,
.undef_usize, .zero_usize, .one_usize => .usize_type,
.undef_u1, .zero_u1, .one_u1 => .u1_type,
.zero_u8, .one_u8, .four_u8 => .u8_type,
.void_value => .void_type,
.unreachable_value => .noreturn_type,
.null_value => .null_type,
.bool_true, .bool_false => .bool_type,
.undef_bool, .bool_true, .bool_false => .bool_type,
.empty_tuple => .empty_tuple_type,
// This optimization on tags is needed so that indexToKey can call
@ -12186,10 +12216,11 @@ pub fn zigTypeTag(ip: *const InternPool, index: Index) std.builtin.TypeId {
.undefined_type => .undefined,
.enum_literal_type => .enum_literal,
.ptr_usize_type,
.ptr_const_comptime_int_type,
.manyptr_u8_type,
.manyptr_const_u8_type,
.manyptr_const_u8_sentinel_0_type,
.single_const_pointer_to_comptime_int_type,
.slice_const_u8_type,
.slice_const_u8_sentinel_0_type,
=> .pointer,
@ -12251,11 +12282,16 @@ pub fn zigTypeTag(ip: *const InternPool, index: Index) std.builtin.TypeId {
// values, not types
.undef => unreachable,
.undef_bool => unreachable,
.undef_usize => unreachable,
.undef_u1 => unreachable,
.zero => unreachable,
.zero_usize => unreachable,
.zero_u1 => unreachable,
.zero_u8 => unreachable,
.one => unreachable,
.one_usize => unreachable,
.one_u1 => unreachable,
.one_u8 => unreachable,
.four_u8 => unreachable,
.negative_one => unreachable,

File diff suppressed because it is too large Load Diff

View File

@ -168,7 +168,7 @@ fn addWithOverflowScalar(
else => unreachable,
}
if (lhs.isUndef(zcu) or rhs.isUndef(zcu)) return .{
.overflow_bit = try pt.undefValue(.u1),
.overflow_bit = .undef_u1,
.wrapped_result = try pt.undefValue(ty),
};
return intAddWithOverflow(sema, lhs, rhs, ty);
@ -229,7 +229,7 @@ fn subWithOverflowScalar(
else => unreachable,
}
if (lhs.isUndef(zcu) or rhs.isUndef(zcu)) return .{
.overflow_bit = try pt.undefValue(.u1),
.overflow_bit = .undef_u1,
.wrapped_result = try pt.undefValue(ty),
};
return intSubWithOverflow(sema, lhs, rhs, ty);
@ -290,7 +290,7 @@ fn mulWithOverflowScalar(
else => unreachable,
}
if (lhs.isUndef(zcu) or rhs.isUndef(zcu)) return .{
.overflow_bit = try pt.undefValue(.u1),
.overflow_bit = .undef_u1,
.wrapped_result = try pt.undefValue(ty),
};
return intMulWithOverflow(sema, lhs, rhs, ty);
@ -1043,7 +1043,7 @@ fn comptimeIntAdd(sema: *Sema, lhs: Value, rhs: Value) !Value {
fn intAddWithOverflow(sema: *Sema, lhs: Value, rhs: Value, ty: Type) !Value.OverflowArithmeticResult {
switch (ty.toIntern()) {
.comptime_int_type => return .{
.overflow_bit = try sema.pt.intValue(.u1, 0),
.overflow_bit = .zero_u1,
.wrapped_result = try comptimeIntAdd(sema, lhs, rhs),
},
else => return intAddWithOverflowInner(sema, lhs, rhs, ty),
@ -1125,7 +1125,7 @@ fn comptimeIntSub(sema: *Sema, lhs: Value, rhs: Value) !Value {
fn intSubWithOverflow(sema: *Sema, lhs: Value, rhs: Value, ty: Type) !Value.OverflowArithmeticResult {
switch (ty.toIntern()) {
.comptime_int_type => return .{
.overflow_bit = try sema.pt.intValue(.u1, 0),
.overflow_bit = .zero_u1,
.wrapped_result = try comptimeIntSub(sema, lhs, rhs),
},
else => return intSubWithOverflowInner(sema, lhs, rhs, ty),
@ -1211,7 +1211,7 @@ fn comptimeIntMul(sema: *Sema, lhs: Value, rhs: Value) !Value {
fn intMulWithOverflow(sema: *Sema, lhs: Value, rhs: Value, ty: Type) !Value.OverflowArithmeticResult {
switch (ty.toIntern()) {
.comptime_int_type => return .{
.overflow_bit = try sema.pt.intValue(.u1, 0),
.overflow_bit = .zero_u1,
.wrapped_result = try comptimeIntMul(sema, lhs, rhs),
},
else => return intMulWithOverflowInner(sema, lhs, rhs, ty),

View File

@ -2641,10 +2641,7 @@ pub fn onePossibleValue(starting_type: Type, pt: Zcu.PerThread) !?Value {
if (enum_type.values.len == 0) {
const only = try pt.intern(.{ .enum_tag = .{
.ty = ty.toIntern(),
.int = try pt.intern(.{ .int = .{
.ty = enum_type.tag_ty,
.storage = .{ .u64 = 0 },
} }),
.int = (try pt.intValue(.fromInterned(enum_type.tag_ty), 0)).toIntern(),
} });
return Value.fromInterned(only);
} else {
@ -3676,10 +3673,11 @@ pub fn resolveFields(ty: Type, pt: Zcu.PerThread) SemaError!void {
.null_type,
.undefined_type,
.enum_literal_type,
.ptr_usize_type,
.ptr_const_comptime_int_type,
.manyptr_u8_type,
.manyptr_const_u8_type,
.manyptr_const_u8_sentinel_0_type,
.single_const_pointer_to_comptime_int_type,
.slice_const_u8_type,
.slice_const_u8_sentinel_0_type,
.optional_noreturn_type,
@ -3691,9 +3689,11 @@ pub fn resolveFields(ty: Type, pt: Zcu.PerThread) SemaError!void {
.undef => unreachable,
.zero => unreachable,
.zero_usize => unreachable,
.zero_u1 => unreachable,
.zero_u8 => unreachable,
.one => unreachable,
.one_usize => unreachable,
.one_u1 => unreachable,
.one_u8 => unreachable,
.four_u8 => unreachable,
.negative_one => unreachable,
@ -4100,10 +4100,11 @@ pub const @"c_longlong": Type = .{ .ip_index = .c_longlong_type };
pub const @"c_ulonglong": Type = .{ .ip_index = .c_ulonglong_type };
pub const @"c_longdouble": Type = .{ .ip_index = .c_longdouble_type };
pub const ptr_usize: Type = .{ .ip_index = .ptr_usize_type };
pub const ptr_const_comptime_int: Type = .{ .ip_index = .ptr_const_comptime_int_type };
pub const manyptr_u8: Type = .{ .ip_index = .manyptr_u8_type };
pub const manyptr_const_u8: Type = .{ .ip_index = .manyptr_const_u8_type };
pub const manyptr_const_u8_sentinel_0: Type = .{ .ip_index = .manyptr_const_u8_sentinel_0_type };
pub const single_const_pointer_to_comptime_int: Type = .{ .ip_index = .single_const_pointer_to_comptime_int_type };
pub const slice_const_u8: Type = .{ .ip_index = .slice_const_u8_type };
pub const slice_const_u8_sentinel_0: Type = .{ .ip_index = .slice_const_u8_sentinel_0_type };

View File

@ -2895,19 +2895,25 @@ pub fn intValueBounds(val: Value, pt: Zcu.PerThread) !?[2]Value {
pub const BigIntSpace = InternPool.Key.Int.Storage.BigIntSpace;
pub const zero_usize: Value = .{ .ip_index = .zero_usize };
pub const zero_u8: Value = .{ .ip_index = .zero_u8 };
pub const zero_comptime_int: Value = .{ .ip_index = .zero };
pub const one_comptime_int: Value = .{ .ip_index = .one };
pub const negative_one_comptime_int: Value = .{ .ip_index = .negative_one };
pub const undef: Value = .{ .ip_index = .undef };
pub const undef_bool: Value = .{ .ip_index = .undef_bool };
pub const undef_usize: Value = .{ .ip_index = .undef_usize };
pub const undef_u1: Value = .{ .ip_index = .undef_u1 };
pub const zero_comptime_int: Value = .{ .ip_index = .zero };
pub const zero_usize: Value = .{ .ip_index = .zero_usize };
pub const zero_u1: Value = .{ .ip_index = .zero_u1 };
pub const zero_u8: Value = .{ .ip_index = .zero_u8 };
pub const one_comptime_int: Value = .{ .ip_index = .one };
pub const one_usize: Value = .{ .ip_index = .one_usize };
pub const one_u1: Value = .{ .ip_index = .one_u1 };
pub const one_u8: Value = .{ .ip_index = .one_u8 };
pub const four_u8: Value = .{ .ip_index = .four_u8 };
pub const negative_one_comptime_int: Value = .{ .ip_index = .negative_one };
pub const @"void": Value = .{ .ip_index = .void_value };
pub const @"null": Value = .{ .ip_index = .null_value };
pub const @"false": Value = .{ .ip_index = .bool_false };
pub const @"true": Value = .{ .ip_index = .bool_true };
pub const @"unreachable": Value = .{ .ip_index = .unreachable_value };
pub const generic_poison_type: Value = .{ .ip_index = .generic_poison_type };
pub const @"null": Value = .{ .ip_index = .null_value };
pub const @"true": Value = .{ .ip_index = .bool_true };
pub const @"false": Value = .{ .ip_index = .bool_false };
pub const empty_tuple: Value = .{ .ip_index = .empty_tuple };
pub fn makeBool(x: bool) Value {

View File

@ -441,8 +441,7 @@ pub const BuiltinDecl = enum {
@"panic.castToNull",
@"panic.incorrectAlignment",
@"panic.invalidErrorCode",
@"panic.castTruncatedData",
@"panic.negativeToUnsigned",
@"panic.integerOutOfBounds",
@"panic.integerOverflow",
@"panic.shlOverflow",
@"panic.shrOverflow",
@ -518,8 +517,7 @@ pub const BuiltinDecl = enum {
.@"panic.castToNull",
.@"panic.incorrectAlignment",
.@"panic.invalidErrorCode",
.@"panic.castTruncatedData",
.@"panic.negativeToUnsigned",
.@"panic.integerOutOfBounds",
.@"panic.integerOverflow",
.@"panic.shlOverflow",
.@"panic.shrOverflow",
@ -585,8 +583,7 @@ pub const SimplePanicId = enum {
cast_to_null,
incorrect_alignment,
invalid_error_code,
cast_truncated_data,
negative_to_unsigned,
integer_out_of_bounds,
integer_overflow,
shl_overflow,
shr_overflow,
@ -609,8 +606,7 @@ pub const SimplePanicId = enum {
.cast_to_null => .@"panic.castToNull",
.incorrect_alignment => .@"panic.incorrectAlignment",
.invalid_error_code => .@"panic.invalidErrorCode",
.cast_truncated_data => .@"panic.castTruncatedData",
.negative_to_unsigned => .@"panic.negativeToUnsigned",
.integer_out_of_bounds => .@"panic.integerOutOfBounds",
.integer_overflow => .@"panic.integerOverflow",
.shl_overflow => .@"panic.shlOverflow",
.shr_overflow => .@"panic.shrOverflow",
@ -3829,26 +3825,8 @@ pub const Feature = enum {
is_named_enum_value,
error_set_has_value,
field_reordering,
/// When this feature is supported, the backend supports the following AIR instructions:
/// * `Air.Inst.Tag.add_safe`
/// * `Air.Inst.Tag.sub_safe`
/// * `Air.Inst.Tag.mul_safe`
/// * `Air.Inst.Tag.intcast_safe`
/// The motivation for this feature is that it makes AIR smaller, and makes it easier
/// to generate better machine code in the backends. All backends should migrate to
/// enabling this feature.
safety_checked_instructions,
/// If the backend supports running from another thread.
separate_thread,
/// If the backend supports the following AIR instructions with vector types:
/// * `Air.Inst.Tag.bit_and`
/// * `Air.Inst.Tag.bit_or`
/// * `Air.Inst.Tag.bitcast`
/// * `Air.Inst.Tag.float_from_int`
/// * `Air.Inst.Tag.fptrunc`
/// * `Air.Inst.Tag.int_from_float`
/// If not supported, Sema will scalarize the operation.
all_vector_instructions,
};
pub fn backendSupportsFeature(zcu: *const Zcu, comptime feature: Feature) bool {

View File

@ -1741,10 +1741,11 @@ pub fn linkerUpdateFunc(pt: Zcu.PerThread, func_index: InternPool.Index, air: *A
return;
}
const backend = target_util.zigBackend(zcu.root_mod.resolved_target.result, zcu.comp.config.use_llvm);
try air.legalize(backend, zcu);
legalize: {
try air.legalize(pt, @import("../codegen.zig").legalizeFeatures(pt, nav_index) orelse break :legalize);
}
var liveness = try Air.Liveness.analyze(gpa, air.*, ip);
var liveness = try Air.Liveness.analyze(zcu, air.*, ip);
defer liveness.deinit(gpa);
if (build_options.enable_debug_extensions and comp.verbose_air) {
@ -1756,6 +1757,7 @@ pub fn linkerUpdateFunc(pt: Zcu.PerThread, func_index: InternPool.Index, air: *A
if (std.debug.runtime_safety) {
var verify: Air.Liveness.Verify = .{
.gpa = gpa,
.zcu = zcu,
.air = air.*,
.liveness = liveness,
.intern_pool = ip,
@ -3022,7 +3024,7 @@ fn analyzeFnBodyInner(pt: Zcu.PerThread, func_index: InternPool.Index) Zcu.SemaE
// is unused so it just has to be a no-op.
sema.air_instructions.set(@intFromEnum(ptr_inst), .{
.tag = .alloc,
.data = .{ .ty = Type.single_const_pointer_to_comptime_int },
.data = .{ .ty = .ptr_const_comptime_int },
});
}
@ -3843,6 +3845,21 @@ pub fn nullValue(pt: Zcu.PerThread, opt_ty: Type) Allocator.Error!Value {
} }));
}
/// `ty` is an integer or a vector of integers.
pub fn overflowArithmeticTupleType(pt: Zcu.PerThread, ty: Type) !Type {
const zcu = pt.zcu;
const ip = &zcu.intern_pool;
const ov_ty: Type = if (ty.zigTypeTag(zcu) == .vector) try pt.vectorType(.{
.len = ty.vectorLen(zcu),
.child = .u1_type,
}) else .u1;
const tuple_ty = try ip.getTupleType(zcu.gpa, pt.tid, .{
.types = &.{ ty.toIntern(), ov_ty.toIntern() },
.values = &.{ .none, .none },
});
return .fromInterned(tuple_ty);
}
pub fn smallestUnsignedInt(pt: Zcu.PerThread, max: u64) Allocator.Error!Type {
return pt.intType(.unsigned, Type.smallestUnsignedBits(max));
}

View File

@ -40,6 +40,10 @@ const gp = abi.RegisterClass.gp;
const InnerError = CodeGenError || error{OutOfRegisters};
pub fn legalizeFeatures(_: *const std.Target) ?*const Air.Legalize.Features {
return null;
}
gpa: Allocator,
pt: Zcu.PerThread,
air: Air,
@ -774,7 +778,8 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void {
.error_name => try self.airErrorName(inst),
.splat => try self.airSplat(inst),
.select => try self.airSelect(inst),
.shuffle => try self.airShuffle(inst),
.shuffle_one => try self.airShuffleOne(inst),
.shuffle_two => try self.airShuffleTwo(inst),
.reduce => try self.airReduce(inst),
.aggregate_init => try self.airAggregateInit(inst),
.union_init => try self.airUnionInit(inst),
@ -2261,12 +2266,13 @@ fn shiftExact(
rhs_ty: Type,
maybe_inst: ?Air.Inst.Index,
) InnerError!MCValue {
_ = rhs_ty;
const pt = self.pt;
const zcu = pt.zcu;
switch (lhs_ty.zigTypeTag(zcu)) {
.vector => return self.fail("TODO binary operations on vectors", .{}),
.vector => if (!rhs_ty.isVector(zcu))
return self.fail("TODO vector shift with scalar rhs", .{})
else
return self.fail("TODO binary operations on vectors", .{}),
.int => {
const int_info = lhs_ty.intInfo(zcu);
if (int_info.bits <= 64) {
@ -2317,7 +2323,10 @@ fn shiftNormal(
const pt = self.pt;
const zcu = pt.zcu;
switch (lhs_ty.zigTypeTag(zcu)) {
.vector => return self.fail("TODO binary operations on vectors", .{}),
.vector => if (!rhs_ty.isVector(zcu))
return self.fail("TODO vector shift with scalar rhs", .{})
else
return self.fail("TODO binary operations on vectors", .{}),
.int => {
const int_info = lhs_ty.intInfo(zcu);
if (int_info.bits <= 64) {
@ -2874,7 +2883,10 @@ fn airShlWithOverflow(self: *Self, inst: Air.Inst.Index) InnerError!void {
const overflow_bit_offset = @as(u32, @intCast(tuple_ty.structFieldOffset(1, zcu)));
switch (lhs_ty.zigTypeTag(zcu)) {
.vector => return self.fail("TODO implement shl_with_overflow for vectors", .{}),
.vector => if (!rhs_ty.isVector(zcu))
return self.fail("TODO implement vector shl_with_overflow with scalar rhs", .{})
else
return self.fail("TODO implement shl_with_overflow for vectors", .{}),
.int => {
const int_info = lhs_ty.intInfo(zcu);
if (int_info.bits <= 64) {
@ -2993,8 +3005,14 @@ fn airShlWithOverflow(self: *Self, inst: Air.Inst.Index) InnerError!void {
}
fn airShlSat(self: *Self, inst: Air.Inst.Index) InnerError!void {
const zcu = self.pt.zcu;
const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement shl_sat for {}", .{self.target.cpu.arch});
const result: MCValue = if (self.liveness.isUnused(inst))
.dead
else if (self.typeOf(bin_op.lhs).isVector(zcu) and !self.typeOf(bin_op.rhs).isVector(zcu))
return self.fail("TODO implement vector shl_sat with scalar rhs for {}", .{self.target.cpu.arch})
else
return self.fail("TODO implement shl_sat for {}", .{self.target.cpu.arch});
return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
}
@ -6032,11 +6050,14 @@ fn airSelect(self: *Self, inst: Air.Inst.Index) InnerError!void {
return self.finishAir(inst, result, .{ pl_op.operand, extra.lhs, extra.rhs });
}
fn airShuffle(self: *Self, inst: Air.Inst.Index) InnerError!void {
const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const extra = self.air.extraData(Air.Shuffle, ty_pl.payload).data;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement airShuffle for {}", .{self.target.cpu.arch});
return self.finishAir(inst, result, .{ extra.a, extra.b, .none });
fn airShuffleOne(self: *Self, inst: Air.Inst.Index) InnerError!void {
_ = inst;
return self.fail("TODO implement airShuffleOne for {}", .{self.target.cpu.arch});
}
fn airShuffleTwo(self: *Self, inst: Air.Inst.Index) InnerError!void {
_ = inst;
return self.fail("TODO implement airShuffleTwo for {}", .{self.target.cpu.arch});
}
fn airReduce(self: *Self, inst: Air.Inst.Index) InnerError!void {

View File

@ -41,6 +41,10 @@ const gp = abi.RegisterClass.gp;
const InnerError = CodeGenError || error{OutOfRegisters};
pub fn legalizeFeatures(_: *const std.Target) ?*const Air.Legalize.Features {
return null;
}
gpa: Allocator,
pt: Zcu.PerThread,
air: Air,
@ -763,7 +767,8 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void {
.error_name => try self.airErrorName(inst),
.splat => try self.airSplat(inst),
.select => try self.airSelect(inst),
.shuffle => try self.airShuffle(inst),
.shuffle_one => try self.airShuffleOne(inst),
.shuffle_two => try self.airShuffleTwo(inst),
.reduce => try self.airReduce(inst),
.aggregate_init => try self.airAggregateInit(inst),
.union_init => try self.airUnionInit(inst),
@ -1857,7 +1862,10 @@ fn airShlWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
const overflow_bit_offset: u32 = @intCast(tuple_ty.structFieldOffset(1, zcu));
switch (lhs_ty.zigTypeTag(zcu)) {
.vector => return self.fail("TODO implement shl_with_overflow for vectors", .{}),
.vector => if (!rhs_ty.isVector(zcu))
return self.fail("TODO implement vector shl_with_overflow with scalar rhs", .{})
else
return self.fail("TODO implement shl_with_overflow for vectors", .{}),
.int => {
const int_info = lhs_ty.intInfo(zcu);
if (int_info.bits <= 32) {
@ -1978,8 +1986,14 @@ fn airShlWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
}
fn airShlSat(self: *Self, inst: Air.Inst.Index) !void {
const zcu = self.pt.zcu;
const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement shl_sat for {}", .{self.target.cpu.arch});
const result: MCValue = if (self.liveness.isUnused(inst))
.dead
else if (self.typeOf(bin_op.lhs).isVector(zcu) and !self.typeOf(bin_op.rhs).isVector(zcu))
return self.fail("TODO implement vector shl_sat with scalar rhs for {}", .{self.target.cpu.arch})
else
return self.fail("TODO implement shl_sat for {}", .{self.target.cpu.arch});
return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
}
@ -3788,7 +3802,10 @@ fn shiftExact(
const pt = self.pt;
const zcu = pt.zcu;
switch (lhs_ty.zigTypeTag(zcu)) {
.vector => return self.fail("TODO ARM binary operations on vectors", .{}),
.vector => if (!rhs_ty.isVector(zcu))
return self.fail("TODO ARM vector shift with scalar rhs", .{})
else
return self.fail("TODO ARM binary operations on vectors", .{}),
.int => {
const int_info = lhs_ty.intInfo(zcu);
if (int_info.bits <= 32) {
@ -3828,7 +3845,10 @@ fn shiftNormal(
const pt = self.pt;
const zcu = pt.zcu;
switch (lhs_ty.zigTypeTag(zcu)) {
.vector => return self.fail("TODO ARM binary operations on vectors", .{}),
.vector => if (!rhs_ty.isVector(zcu))
return self.fail("TODO ARM vector shift with scalar rhs", .{})
else
return self.fail("TODO ARM binary operations on vectors", .{}),
.int => {
const int_info = lhs_ty.intInfo(zcu);
if (int_info.bits <= 32) {
@ -6002,10 +6022,14 @@ fn airSelect(self: *Self, inst: Air.Inst.Index) !void {
return self.finishAir(inst, result, .{ pl_op.operand, extra.lhs, extra.rhs });
}
fn airShuffle(self: *Self, inst: Air.Inst.Index) !void {
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement airShuffle for arm", .{});
return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
fn airShuffleOne(self: *Self, inst: Air.Inst.Index) !void {
_ = inst;
return self.fail("TODO implement airShuffleOne for arm", .{});
}
fn airShuffleTwo(self: *Self, inst: Air.Inst.Index) !void {
_ = inst;
return self.fail("TODO implement airShuffleTwo for arm", .{});
}
fn airReduce(self: *Self, inst: Air.Inst.Index) !void {

View File

@ -10,6 +10,10 @@ const Zcu = @import("../../Zcu.zig");
const assert = std.debug.assert;
const log = std.log.scoped(.codegen);
pub fn legalizeFeatures(_: *const std.Target) ?*const Air.Legalize.Features {
return null;
}
pub fn generate(
bin_file: *link.File,
pt: Zcu.PerThread,

View File

@ -51,6 +51,15 @@ const Instruction = encoding.Instruction;
const InnerError = CodeGenError || error{OutOfRegisters};
pub fn legalizeFeatures(_: *const std.Target) *const Air.Legalize.Features {
return comptime &.initMany(&.{
.expand_intcast_safe,
.expand_add_safe,
.expand_sub_safe,
.expand_mul_safe,
});
}
pt: Zcu.PerThread,
air: Air,
liveness: Air.Liveness,
@ -1577,7 +1586,8 @@ fn genBody(func: *Func, body: []const Air.Inst.Index) InnerError!void {
.error_name => try func.airErrorName(inst),
.splat => try func.airSplat(inst),
.select => try func.airSelect(inst),
.shuffle => try func.airShuffle(inst),
.shuffle_one => try func.airShuffleOne(inst),
.shuffle_two => try func.airShuffleTwo(inst),
.reduce => try func.airReduce(inst),
.aggregate_init => try func.airAggregateInit(inst),
.union_init => try func.airUnionInit(inst),
@ -2764,6 +2774,7 @@ fn genBinOp(
.shl,
.shl_exact,
=> {
if (lhs_ty.isVector(zcu) and !rhs_ty.isVector(zcu)) return func.fail("TODO: vector shift with scalar rhs", .{});
if (bit_size > 64) return func.fail("TODO: genBinOp shift > 64 bits, {}", .{bit_size});
try func.truncateRegister(rhs_ty, rhs_reg);
@ -3248,8 +3259,14 @@ fn airMulWithOverflow(func: *Func, inst: Air.Inst.Index) !void {
}
fn airShlWithOverflow(func: *Func, inst: Air.Inst.Index) !void {
const zcu = func.pt.zcu;
const bin_op = func.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
const result: MCValue = if (func.liveness.isUnused(inst)) .unreach else return func.fail("TODO implement airShlWithOverflow", .{});
const result: MCValue = if (func.liveness.isUnused(inst))
.unreach
else if (func.typeOf(bin_op.lhs).isVector(zcu) and !func.typeOf(bin_op.rhs).isVector(zcu))
return func.fail("TODO implement vector airShlWithOverflow with scalar rhs", .{})
else
return func.fail("TODO implement airShlWithOverflow", .{});
return func.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
}
@ -3266,8 +3283,14 @@ fn airMulSat(func: *Func, inst: Air.Inst.Index) !void {
}
fn airShlSat(func: *Func, inst: Air.Inst.Index) !void {
const zcu = func.pt.zcu;
const bin_op = func.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
const result: MCValue = if (func.liveness.isUnused(inst)) .unreach else return func.fail("TODO implement airShlSat", .{});
const result: MCValue = if (func.liveness.isUnused(inst))
.unreach
else if (func.typeOf(bin_op.lhs).isVector(zcu) and !func.typeOf(bin_op.rhs).isVector(zcu))
return func.fail("TODO implement vector airShlSat with scalar rhs", .{})
else
return func.fail("TODO implement airShlSat", .{});
return func.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
}
@ -8008,10 +8031,14 @@ fn airSelect(func: *Func, inst: Air.Inst.Index) !void {
return func.finishAir(inst, result, .{ pl_op.operand, extra.lhs, extra.rhs });
}
fn airShuffle(func: *Func, inst: Air.Inst.Index) !void {
const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const result: MCValue = if (func.liveness.isUnused(inst)) .unreach else return func.fail("TODO implement airShuffle for riscv64", .{});
return func.finishAir(inst, result, .{ ty_op.operand, .none, .none });
fn airShuffleOne(func: *Func, inst: Air.Inst.Index) !void {
_ = inst;
return func.fail("TODO implement airShuffleOne for riscv64", .{});
}
fn airShuffleTwo(func: *Func, inst: Air.Inst.Index) !void {
_ = inst;
return func.fail("TODO implement airShuffleTwo for riscv64", .{});
}
fn airReduce(func: *Func, inst: Air.Inst.Index) !void {

View File

@ -41,6 +41,10 @@ const Self = @This();
const InnerError = CodeGenError || error{OutOfRegisters};
pub fn legalizeFeatures(_: *const std.Target) ?*const Air.Legalize.Features {
return null;
}
const RegisterView = enum(u1) {
caller,
callee,
@ -617,7 +621,8 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void {
.error_name => try self.airErrorName(inst),
.splat => try self.airSplat(inst),
.select => @panic("TODO try self.airSelect(inst)"),
.shuffle => @panic("TODO try self.airShuffle(inst)"),
.shuffle_one => @panic("TODO try self.airShuffleOne(inst)"),
.shuffle_two => @panic("TODO try self.airShuffleTwo(inst)"),
.reduce => @panic("TODO try self.airReduce(inst)"),
.aggregate_init => try self.airAggregateInit(inst),
.union_init => try self.airUnionInit(inst),
@ -2270,8 +2275,14 @@ fn airSetUnionTag(self: *Self, inst: Air.Inst.Index) !void {
}
fn airShlSat(self: *Self, inst: Air.Inst.Index) !void {
const zcu = self.pt.zcu;
const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement shl_sat for {}", .{self.target.cpu.arch});
const result: MCValue = if (self.liveness.isUnused(inst))
.dead
else if (self.typeOf(bin_op.lhs).isVector(zcu) and !self.typeOf(bin_op.rhs).isVector(zcu))
return self.fail("TODO implement vector shl_sat with scalar rhs for {}", .{self.target.cpu.arch})
else
return self.fail("TODO implement shl_sat for {}", .{self.target.cpu.arch});
return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
}
@ -2287,7 +2298,10 @@ fn airShlWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
const rhs_ty = self.typeOf(extra.rhs);
switch (lhs_ty.zigTypeTag(zcu)) {
.vector => return self.fail("TODO implement mul_with_overflow for vectors", .{}),
.vector => if (!rhs_ty.isVector(zcu))
return self.fail("TODO implement vector shl_with_overflow with scalar rhs", .{})
else
return self.fail("TODO implement mul_with_overflow for vectors", .{}),
.int => {
const int_info = lhs_ty.intInfo(zcu);
if (int_info.bits <= 64) {
@ -3002,7 +3016,10 @@ fn binOp(
// Truncate if necessary
switch (lhs_ty.zigTypeTag(zcu)) {
.vector => return self.fail("TODO binary operations on vectors", .{}),
.vector => if (rhs_ty.isVector(zcu))
return self.fail("TODO vector shift with scalar rhs", .{})
else
return self.fail("TODO binary operations on vectors", .{}),
.int => {
const int_info = lhs_ty.intInfo(zcu);
if (int_info.bits <= 64) {
@ -3024,7 +3041,10 @@ fn binOp(
.shr_exact,
=> {
switch (lhs_ty.zigTypeTag(zcu)) {
.vector => return self.fail("TODO binary operations on vectors", .{}),
.vector => if (rhs_ty.isVector(zcu))
return self.fail("TODO vector shift with scalar rhs", .{})
else
return self.fail("TODO binary operations on vectors", .{}),
.int => {
const int_info = lhs_ty.intInfo(zcu);
if (int_info.bits <= 64) {

View File

@ -31,6 +31,15 @@ const libcFloatSuffix = target_util.libcFloatSuffix;
const compilerRtFloatAbbrev = target_util.compilerRtFloatAbbrev;
const compilerRtIntAbbrev = target_util.compilerRtIntAbbrev;
pub fn legalizeFeatures(_: *const std.Target) *const Air.Legalize.Features {
return comptime &.initMany(&.{
.expand_intcast_safe,
.expand_add_safe,
.expand_sub_safe,
.expand_mul_safe,
});
}
/// Reference to the function declaration the code
/// section belongs to
owner_nav: InternPool.Nav.Index,
@ -1995,7 +2004,8 @@ fn genInst(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
.ret_load => cg.airRetLoad(inst),
.splat => cg.airSplat(inst),
.select => cg.airSelect(inst),
.shuffle => cg.airShuffle(inst),
.shuffle_one => cg.airShuffleOne(inst),
.shuffle_two => cg.airShuffleTwo(inst),
.reduce => cg.airReduce(inst),
.aggregate_init => cg.airAggregateInit(inst),
.union_init => cg.airUnionInit(inst),
@ -2638,6 +2648,10 @@ fn airBinOp(cg: *CodeGen, inst: Air.Inst.Index, op: Op) InnerError!void {
// For big integers we can ignore this as we will call into compiler-rt which handles this.
const result = switch (op) {
.shr, .shl => result: {
if (lhs_ty.isVector(zcu) and !rhs_ty.isVector(zcu)) {
return cg.fail("TODO: implement vector '{s}' with scalar rhs", .{@tagName(op)});
}
const lhs_wasm_bits = toWasmBits(@intCast(lhs_ty.bitSize(zcu))) orelse {
return cg.fail("TODO: implement '{s}' for types larger than 128 bits", .{@tagName(op)});
};
@ -3055,8 +3069,12 @@ fn airWrapBinOp(cg: *CodeGen, inst: Air.Inst.Index, op: Op) InnerError!void {
const lhs_ty = cg.typeOf(bin_op.lhs);
const rhs_ty = cg.typeOf(bin_op.rhs);
if (lhs_ty.zigTypeTag(zcu) == .vector or rhs_ty.zigTypeTag(zcu) == .vector) {
return cg.fail("TODO: Implement wrapping arithmetic for vectors", .{});
if (lhs_ty.isVector(zcu)) {
if ((op == .shr or op == .shl) and !rhs_ty.isVector(zcu)) {
return cg.fail("TODO: implement wrapping vector '{s}' with scalar rhs", .{@tagName(op)});
} else {
return cg.fail("TODO: implement wrapping '{s}' for vectors", .{@tagName(op)});
}
}
// For certain operations, such as shifting, the types are different.
@ -5160,66 +5178,105 @@ fn airSelect(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
return cg.fail("TODO: Implement wasm airSelect", .{});
}
fn airShuffle(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
fn airShuffleOne(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const pt = cg.pt;
const zcu = pt.zcu;
const inst_ty = cg.typeOfIndex(inst);
const ty_pl = cg.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const extra = cg.air.extraData(Air.Shuffle, ty_pl.payload).data;
const a = try cg.resolveInst(extra.a);
const b = try cg.resolveInst(extra.b);
const mask = Value.fromInterned(extra.mask);
const mask_len = extra.mask_len;
const unwrapped = cg.air.unwrapShuffleOne(zcu, inst);
const result_ty = unwrapped.result_ty;
const mask = unwrapped.mask;
const operand = try cg.resolveInst(unwrapped.operand);
const child_ty = inst_ty.childType(zcu);
const elem_size = child_ty.abiSize(zcu);
const elem_ty = result_ty.childType(zcu);
const elem_size = elem_ty.abiSize(zcu);
// TODO: One of them could be by ref; handle in loop
if (isByRef(cg.typeOf(extra.a), zcu, cg.target) or isByRef(inst_ty, zcu, cg.target)) {
const result = try cg.allocStack(inst_ty);
// TODO: this function could have an `i8x16_shuffle` fast path like `airShuffleTwo` if we were
// to lower the comptime-known operands to a non-by-ref vector value.
for (0..mask_len) |index| {
const value = (try mask.elemValue(pt, index)).toSignedInt(zcu);
// TODO: this is incorrect if either operand or the result is *not* by-ref, which is possible.
// I tried to fix it, but I couldn't make much sense of how this backend handles memory.
if (!isByRef(result_ty, zcu, cg.target) or
!isByRef(cg.typeOf(unwrapped.operand), zcu, cg.target)) return cg.fail("TODO: handle mixed by-ref shuffle", .{});
try cg.emitWValue(result);
const loaded = if (value >= 0)
try cg.load(a, child_ty, @as(u32, @intCast(@as(i64, @intCast(elem_size)) * value)))
else
try cg.load(b, child_ty, @as(u32, @intCast(@as(i64, @intCast(elem_size)) * ~value)));
try cg.store(.stack, loaded, child_ty, result.stack_offset.value + @as(u32, @intCast(elem_size)) * @as(u32, @intCast(index)));
const dest_alloc = try cg.allocStack(result_ty);
for (mask, 0..) |mask_elem, out_idx| {
try cg.emitWValue(dest_alloc);
const elem_val = switch (mask_elem.unwrap()) {
.elem => |idx| try cg.load(operand, elem_ty, @intCast(elem_size * idx)),
.value => |val| try cg.lowerConstant(.fromInterned(val), elem_ty),
};
try cg.store(.stack, elem_val, elem_ty, @intCast(dest_alloc.offset() + elem_size * out_idx));
}
return cg.finishAir(inst, dest_alloc, &.{unwrapped.operand});
}
return cg.finishAir(inst, result, &.{ extra.a, extra.b });
} else {
var operands = [_]u32{
@intFromEnum(std.wasm.SimdOpcode.i8x16_shuffle),
} ++ [1]u32{undefined} ** 4;
fn airShuffleTwo(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const pt = cg.pt;
const zcu = pt.zcu;
var lanes = mem.asBytes(operands[1..]);
for (0..@as(usize, @intCast(mask_len))) |index| {
const mask_elem = (try mask.elemValue(pt, index)).toSignedInt(zcu);
const base_index = if (mask_elem >= 0)
@as(u8, @intCast(@as(i64, @intCast(elem_size)) * mask_elem))
else
16 + @as(u8, @intCast(@as(i64, @intCast(elem_size)) * ~mask_elem));
const unwrapped = cg.air.unwrapShuffleTwo(zcu, inst);
const result_ty = unwrapped.result_ty;
const mask = unwrapped.mask;
const operand_a = try cg.resolveInst(unwrapped.operand_a);
const operand_b = try cg.resolveInst(unwrapped.operand_b);
for (0..@as(usize, @intCast(elem_size))) |byte_offset| {
lanes[index * @as(usize, @intCast(elem_size)) + byte_offset] = base_index + @as(u8, @intCast(byte_offset));
const a_ty = cg.typeOf(unwrapped.operand_a);
const b_ty = cg.typeOf(unwrapped.operand_b);
const elem_ty = result_ty.childType(zcu);
const elem_size = elem_ty.abiSize(zcu);
// WASM has `i8x16_shuffle`, which we can apply if the element type bit size is a multiple of 8
// and the input and output vectors have a bit size of 128 (and are hence not by-ref). Otherwise,
// we fall back to a naive loop lowering.
if (!isByRef(a_ty, zcu, cg.target) and
!isByRef(b_ty, zcu, cg.target) and
!isByRef(result_ty, zcu, cg.target) and
elem_ty.bitSize(zcu) % 8 == 0)
{
var lane_map: [16]u8 align(4) = undefined;
const lanes_per_elem: usize = @intCast(elem_ty.bitSize(zcu) / 8);
for (mask, 0..) |mask_elem, out_idx| {
const out_first_lane = out_idx * lanes_per_elem;
const in_first_lane = switch (mask_elem.unwrap()) {
.a_elem => |i| i * lanes_per_elem,
.b_elem => |i| i * lanes_per_elem + 16,
.undef => 0, // doesn't matter
};
for (lane_map[out_first_lane..][0..lanes_per_elem], in_first_lane..) |*out, in| {
out.* = @intCast(in);
}
}
try cg.emitWValue(a);
try cg.emitWValue(b);
try cg.emitWValue(operand_a);
try cg.emitWValue(operand_b);
const extra_index = cg.extraLen();
try cg.mir_extra.appendSlice(cg.gpa, &operands);
try cg.mir_extra.appendSlice(cg.gpa, &.{
@intFromEnum(std.wasm.SimdOpcode.i8x16_shuffle),
@bitCast(lane_map[0..4].*),
@bitCast(lane_map[4..8].*),
@bitCast(lane_map[8..12].*),
@bitCast(lane_map[12..].*),
});
try cg.addInst(.{ .tag = .simd_prefix, .data = .{ .payload = extra_index } });
return cg.finishAir(inst, .stack, &.{ extra.a, extra.b });
return cg.finishAir(inst, .stack, &.{ unwrapped.operand_a, unwrapped.operand_b });
}
// TODO: this is incorrect if either operand or the result is *not* by-ref, which is possible.
// I tried to fix it, but I couldn't make much sense of how this backend handles memory.
if (!isByRef(result_ty, zcu, cg.target) or
!isByRef(a_ty, zcu, cg.target) or
!isByRef(b_ty, zcu, cg.target)) return cg.fail("TODO: handle mixed by-ref shuffle", .{});
const dest_alloc = try cg.allocStack(result_ty);
for (mask, 0..) |mask_elem, out_idx| {
try cg.emitWValue(dest_alloc);
const elem_val = switch (mask_elem.unwrap()) {
.a_elem => |idx| try cg.load(operand_a, elem_ty, @intCast(elem_size * idx)),
.b_elem => |idx| try cg.load(operand_b, elem_ty, @intCast(elem_size * idx)),
.undef => try cg.emitUndefined(elem_ty),
};
try cg.store(.stack, elem_val, elem_ty, @intCast(dest_alloc.offset() + elem_size * out_idx));
}
return cg.finishAir(inst, dest_alloc, &.{ unwrapped.operand_a, unwrapped.operand_b });
}
fn airReduce(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
@ -6067,13 +6124,17 @@ fn airShlWithOverflow(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const ty = cg.typeOf(extra.lhs);
const rhs_ty = cg.typeOf(extra.rhs);
if (ty.zigTypeTag(zcu) == .vector) {
return cg.fail("TODO: Implement overflow arithmetic for vectors", .{});
if (ty.isVector(zcu)) {
if (!rhs_ty.isVector(zcu)) {
return cg.fail("TODO: implement vector 'shl_with_overflow' with scalar rhs", .{});
} else {
return cg.fail("TODO: implement vector 'shl_with_overflow'", .{});
}
}
const int_info = ty.intInfo(zcu);
const wasm_bits = toWasmBits(int_info.bits) orelse {
return cg.fail("TODO: Implement shl_with_overflow for integer bitsize: {d}", .{int_info.bits});
return cg.fail("TODO: implement 'shl_with_overflow' for integer bitsize: {d}", .{int_info.bits});
};
// Ensure rhs is coerced to lhs as they must have the same WebAssembly types
@ -6994,6 +7055,11 @@ fn airShlSat(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const pt = cg.pt;
const zcu = pt.zcu;
if (cg.typeOf(bin_op.lhs).isVector(zcu) and !cg.typeOf(bin_op.rhs).isVector(zcu)) {
return cg.fail("TODO: implement vector 'shl_sat' with scalar rhs", .{});
}
const ty = cg.typeOfIndex(inst);
const int_info = ty.intInfo(zcu);
const is_signed = int_info.signedness == .signed;

View File

@ -32,10 +32,79 @@ const FrameIndex = bits.FrameIndex;
const InnerError = codegen.CodeGenError || error{OutOfRegisters};
pub const legalize_features: Air.Legalize.Features = .{
.remove_shift_vector_rhs_splat = false,
pub fn legalizeFeatures(target: *const std.Target) *const Air.Legalize.Features {
@setEvalBranchQuota(1_200);
return switch (target.ofmt == .coff) {
inline false, true => |use_old| comptime &.init(.{
.scalarize_add = use_old,
.scalarize_add_sat = use_old,
.scalarize_sub = use_old,
.scalarize_sub_sat = use_old,
.scalarize_mul = use_old,
.scalarize_mul_wrap = use_old,
.scalarize_mul_sat = true,
.scalarize_div_float = use_old,
.scalarize_div_float_optimized = use_old,
.scalarize_div_trunc = use_old,
.scalarize_div_trunc_optimized = use_old,
.scalarize_div_floor = use_old,
.scalarize_div_floor_optimized = use_old,
.scalarize_div_exact = use_old,
.scalarize_div_exact_optimized = use_old,
.scalarize_max = use_old,
.scalarize_min = use_old,
.scalarize_add_with_overflow = true,
.scalarize_sub_with_overflow = true,
.scalarize_mul_with_overflow = true,
.scalarize_shl_with_overflow = true,
.scalarize_bit_and = use_old,
.scalarize_bit_or = use_old,
.scalarize_shr = true,
.scalarize_shr_exact = true,
.scalarize_shl = true,
.scalarize_shl_exact = true,
.scalarize_shl_sat = true,
.scalarize_xor = use_old,
.scalarize_not = use_old,
.scalarize_clz = use_old,
.scalarize_ctz = true,
.scalarize_popcount = true,
.scalarize_byte_swap = true,
.scalarize_bit_reverse = true,
.scalarize_sin = use_old,
.scalarize_cos = use_old,
.scalarize_tan = use_old,
.scalarize_exp = use_old,
.scalarize_exp2 = use_old,
.scalarize_log = use_old,
.scalarize_log2 = use_old,
.scalarize_log10 = use_old,
.scalarize_abs = use_old,
.scalarize_floor = use_old,
.scalarize_ceil = use_old,
.scalarize_trunc_float = use_old,
.scalarize_cmp_vector = true,
.scalarize_cmp_vector_optimized = true,
.scalarize_fptrunc = use_old,
.scalarize_fpext = use_old,
.scalarize_intcast = use_old,
.scalarize_int_from_float = use_old,
.scalarize_int_from_float_optimized = use_old,
.scalarize_float_from_int = use_old,
.scalarize_shuffle_one = true,
.scalarize_shuffle_two = true,
.scalarize_select = true,
.scalarize_mul_add = use_old,
.unsplat_shift_rhs = false,
.reduce_one_elem_to_bitcast = true,
};
.expand_intcast_safe = true,
.expand_add_safe = true,
.expand_sub_safe = true,
.expand_mul_safe = true,
}),
};
}
/// Set this to `false` to uncover Sema OPV bugs.
/// https://github.com/ziglang/zig/issues/22419
@ -218,7 +287,7 @@ pub const MCValue = union(enum) {
/// Payload is a frame address.
lea_frame: bits.FrameAddr,
/// Supports integer_per_element abi
elementwise_regs_then_frame: packed struct { regs: u3, frame_off: i29, frame_index: FrameIndex },
elementwise_args: packed struct { regs: u3, frame_off: i29, frame_index: FrameIndex },
/// This indicates that we have already allocated a frame index for this instruction,
/// but it has not been spilled there yet in the current control flow.
/// Payload is a frame index.
@ -240,7 +309,7 @@ pub const MCValue = union(enum) {
.lea_direct,
.lea_got,
.lea_frame,
.elementwise_regs_then_frame,
.elementwise_args,
.reserved_frame,
.air_ref,
=> false,
@ -355,7 +424,7 @@ pub const MCValue = union(enum) {
.lea_direct,
.lea_got,
.lea_frame,
.elementwise_regs_then_frame,
.elementwise_args,
.reserved_frame,
.air_ref,
=> unreachable, // not in memory
@ -389,7 +458,7 @@ pub const MCValue = union(enum) {
.load_got,
.load_frame,
.load_symbol,
.elementwise_regs_then_frame,
.elementwise_args,
.reserved_frame,
.air_ref,
=> unreachable, // not dereferenceable
@ -409,7 +478,7 @@ pub const MCValue = union(enum) {
.unreach,
.dead,
.undef,
.elementwise_regs_then_frame,
.elementwise_args,
.reserved_frame,
.air_ref,
=> unreachable, // not valid
@ -463,7 +532,7 @@ pub const MCValue = union(enum) {
.load_got,
.lea_got,
.lea_frame,
.elementwise_regs_then_frame,
.elementwise_args,
.reserved_frame,
.lea_symbol,
=> unreachable,
@ -547,7 +616,7 @@ pub const MCValue = union(enum) {
.load_got => |pl| try writer.print("[got:{d}]", .{pl}),
.lea_got => |pl| try writer.print("got:{d}", .{pl}),
.load_frame => |pl| try writer.print("[{} + 0x{x}]", .{ pl.index, pl.off }),
.elementwise_regs_then_frame => |pl| try writer.print("elementwise:{d}:[{} + 0x{x}]", .{
.elementwise_args => |pl| try writer.print("elementwise:{d}:[{} + 0x{x}]", .{
pl.regs, pl.frame_index, pl.frame_off,
}),
.lea_frame => |pl| try writer.print("{} + 0x{x}", .{ pl.index, pl.off }),
@ -580,7 +649,7 @@ const InstTracking = struct {
.lea_symbol,
=> result,
.dead,
.elementwise_regs_then_frame,
.elementwise_args,
.reserved_frame,
.air_ref,
=> unreachable,
@ -689,7 +758,7 @@ const InstTracking = struct {
.register_overflow,
.register_mask,
.indirect,
.elementwise_regs_then_frame,
.elementwise_args,
.air_ref,
=> unreachable,
}
@ -2239,11 +2308,17 @@ fn gen(self: *CodeGen) InnerError!void {
try self.genBody(self.air.getMainBody());
const epilogue = if (self.epilogue_relocs.items.len > 0) epilogue: {
const epilogue_relocs_last_index = self.epilogue_relocs.items.len - 1;
for (if (self.epilogue_relocs.items[epilogue_relocs_last_index] == self.mir_instructions.len - 1) epilogue_relocs: {
_ = self.mir_instructions.pop();
break :epilogue_relocs self.epilogue_relocs.items[0..epilogue_relocs_last_index];
} else self.epilogue_relocs.items) |epilogue_reloc| self.performReloc(epilogue_reloc);
var last_inst: Mir.Inst.Index = @intCast(self.mir_instructions.len - 1);
while (self.epilogue_relocs.getLastOrNull() == last_inst) {
self.epilogue_relocs.items.len -= 1;
self.mir_instructions.set(last_inst, .{
.tag = .pseudo,
.ops = .pseudo_dead_none,
.data = undefined,
});
last_inst -= 1;
}
for (self.epilogue_relocs.items) |epilogue_reloc| self.performReloc(epilogue_reloc);
if (self.debug_output != .none) try self.asmPseudo(.pseudo_dbg_epilogue_begin_none);
const backpatch_stack_dealloc = try self.asmPlaceholder();
@ -2430,7 +2505,7 @@ fn genBody(cg: *CodeGen, body: []const Air.Inst.Index) InnerError!void {
switch (air_tags[@intFromEnum(inst)]) {
// zig fmt: off
.select => try cg.airSelect(inst),
.shuffle => try cg.airShuffle(inst),
.shuffle_one, .shuffle_two => @panic("x86_64 TODO: shuffle_one/shuffle_two"),
// zig fmt: on
.arg => if (cg.debug_output != .none) {
@ -5714,7 +5789,7 @@ fn genBody(cg: *CodeGen, body: []const Air.Inst.Index) InnerError!void {
},
.extra_temps = .{
.{ .type = .i64, .kind = .{ .rc = .general_purpose } },
.{ .type = .i64, .kind = .{ .mut_rc = .{ .ref = .src1, .rc = .general_purpose } } },
.{ .type = .i64, .kind = .{ .rc = .general_purpose } },
.unused,
.unused,
.unused,
@ -63352,14 +63427,14 @@ fn genBody(cg: *CodeGen, body: []const Air.Inst.Index) InnerError!void {
defer assert(cg.loops.remove(inst));
try cg.genBodyBlock(@ptrCast(cg.air.extra.items[block.end..][0..block.data.body_len]));
},
.repeat => if (use_old) try cg.airRepeat(inst) else {
.repeat => {
const repeat = air_datas[@intFromEnum(inst)].repeat;
const loop = cg.loops.get(repeat.loop_inst).?;
try cg.restoreState(loop.state, &.{}, .{
.emit_instructions = true,
.update_tracking = false,
.resurrect = false,
.close_scope = true,
.close_scope = false,
});
_ = try cg.asmJmpReloc(loop.target);
},
@ -77234,11 +77309,27 @@ fn genBody(cg: *CodeGen, body: []const Air.Inst.Index) InnerError!void {
},
}
},
.int => res[0] = ops[0].cmpInts(cmp_op, &ops[1], cg) catch |err| break :err err,
.int => {
switch (ty.zigTypeTag(zcu)) {
else => {},
.@"struct", .@"union" => {
assert(ty.containerLayout(zcu) == .@"packed");
for (&ops) |*op| op.wrapInt(cg) catch |err| switch (err) {
error.SelectFailed => return cg.fail("failed to select {s} wrap {} {}", .{
@tagName(air_tag),
ty.fmt(pt),
op.tracking(cg),
}),
else => |e| return e,
};
},
}
res[0] = ops[0].cmpInts(cmp_op, &ops[1], cg) catch |err| break :err err;
},
}) catch |err| switch (err) {
error.SelectFailed => return cg.fail("failed to select {s} {} {} {}", .{
@tagName(air_tag),
cg.typeOf(bin_op.lhs).fmt(pt),
ty.fmt(pt),
ops[0].tracking(cg),
ops[1].tracking(cg),
}),
@ -92468,7 +92559,7 @@ fn genBody(cg: *CodeGen, body: []const Air.Inst.Index) InnerError!void {
.{ ._, ._, .lea, .tmp1p, .mem(.dst0), ._, ._ },
.{ ._, ._, .mov, .tmp2d, .sia(-2, .dst0, .add_size_div_8), ._, ._ },
.{ ._, .@"rep _sq", .mov, ._, ._, ._, ._ },
.{ ._, ._, .mov, .tmp0q, .memad(.src0q, .add_size, -16), ._, ._ },
.{ ._, ._, .mov, .tmp0q, .memad(.src0q, .add_dst0_size, -16), ._, ._ },
.{ ._, ._, .mov, .memad(.dst0q, .add_size, -16), .tmp0q, ._, ._ },
.{ ._, ._r, .sa, .tmp0q, .ui(63), ._, ._ },
.{ ._, ._, .mov, .memad(.dst0q, .add_size, -8), .tmp0q, ._, ._ },
@ -92500,7 +92591,7 @@ fn genBody(cg: *CodeGen, body: []const Air.Inst.Index) InnerError!void {
.{ ._, ._, .lea, .tmp1p, .mem(.dst0), ._, ._ },
.{ ._, ._, .mov, .tmp2d, .sia(-2, .dst0, .add_size_div_8), ._, ._ },
.{ ._, .@"rep _sq", .mov, ._, ._, ._, ._ },
.{ ._, ._, .mov, .tmp0q, .memad(.src0q, .add_size, -16), ._, ._ },
.{ ._, ._, .mov, .tmp0q, .memad(.src0q, .add_dst0_size, -16), ._, ._ },
.{ ._, ._l, .sa, .tmp0q, .uia(64, .dst0, .sub_bit_size_rem_64), ._, ._ },
.{ ._, ._r, .sa, .tmp0q, .uia(64, .dst0, .sub_bit_size_rem_64), ._, ._ },
.{ ._, ._, .mov, .memad(.dst0q, .add_size, -16), .tmp0q, ._, ._ },
@ -92534,7 +92625,7 @@ fn genBody(cg: *CodeGen, body: []const Air.Inst.Index) InnerError!void {
.{ ._, ._, .lea, .tmp1p, .mem(.dst0), ._, ._ },
.{ ._, ._, .mov, .tmp2d, .sia(-1, .dst0, .add_size_div_8), ._, ._ },
.{ ._, .@"rep _sq", .mov, ._, ._, ._, ._ },
.{ ._, ._, .mov, .tmp0q, .memad(.src0q, .add_size, -8), ._, ._ },
.{ ._, ._, .mov, .tmp0q, .memad(.src0q, .add_dst0_size, -8), ._, ._ },
.{ ._, ._l, .sa, .tmp0q, .uia(64, .dst0, .sub_bit_size_rem_64), ._, ._ },
.{ ._, ._r, .sa, .tmp0q, .uia(64, .dst0, .sub_bit_size_rem_64), ._, ._ },
.{ ._, ._, .mov, .memad(.dst0q, .add_size, -8), .tmp0q, ._, ._ },
@ -92595,7 +92686,7 @@ fn genBody(cg: *CodeGen, body: []const Air.Inst.Index) InnerError!void {
.{ ._, ._, .mov, .tmp2d, .sia(-2, .dst0, .add_size_div_8), ._, ._ },
.{ ._, .@"rep _sq", .mov, ._, ._, ._, ._ },
.{ ._, ._, .mov, .tmp2d, .sa(.dst0, .add_bit_size_rem_64), ._, ._ },
.{ ._, ._, .bzhi, .tmp2q, .memad(.src0q, .add_size, -16), .tmp2q, ._ },
.{ ._, ._, .bzhi, .tmp2q, .memad(.src0q, .add_dst0_size, -16), .tmp2q, ._ },
.{ ._, ._, .mov, .memad(.dst0q, .add_size, -16), .tmp2q, ._, ._ },
.{ ._, ._, .mov, .memad(.dst0q, .add_size, -8), .si(0), ._, ._ },
} },
@ -92627,7 +92718,7 @@ fn genBody(cg: *CodeGen, body: []const Air.Inst.Index) InnerError!void {
.{ ._, ._, .mov, .tmp2d, .sia(-1, .dst0, .add_size_div_8), ._, ._ },
.{ ._, .@"rep _sq", .mov, ._, ._, ._, ._ },
.{ ._, ._, .mov, .tmp2d, .sa(.dst0, .add_bit_size_rem_64), ._, ._ },
.{ ._, ._, .bzhi, .tmp2q, .memad(.src0q, .add_size, -8), .tmp2q, ._ },
.{ ._, ._, .bzhi, .tmp2q, .memad(.src0q, .add_dst0_size, -8), .tmp2q, ._ },
.{ ._, ._, .mov, .memad(.dst0q, .add_size, -8), .tmp2q, ._, ._ },
} },
}, .{
@ -92658,7 +92749,7 @@ fn genBody(cg: *CodeGen, body: []const Air.Inst.Index) InnerError!void {
.{ ._, ._, .mov, .tmp2d, .sia(-2, .dst0, .add_size_div_8), ._, ._ },
.{ ._, .@"rep _sq", .mov, ._, ._, ._, ._ },
.{ ._, ._, .mov, .tmp0q, .ua(.dst0, .add_umax), ._, ._ },
.{ ._, ._, .@"and", .tmp0q, .memad(.src0q, .add_size, -16), ._, ._ },
.{ ._, ._, .@"and", .tmp0q, .memad(.src0q, .add_dst0_size, -16), ._, ._ },
.{ ._, ._, .mov, .memad(.dst0q, .add_size, -16), .tmp0q, ._, ._ },
.{ ._, ._, .mov, .memad(.dst0q, .add_size, -8), .si(0), ._, ._ },
} },
@ -92690,7 +92781,7 @@ fn genBody(cg: *CodeGen, body: []const Air.Inst.Index) InnerError!void {
.{ ._, ._, .mov, .tmp2d, .sia(-1, .dst0, .add_size_div_8), ._, ._ },
.{ ._, .@"rep _sq", .mov, ._, ._, ._, ._ },
.{ ._, ._, .mov, .tmp0q, .ua(.dst0, .add_umax), ._, ._ },
.{ ._, ._, .@"and", .tmp0q, .memad(.src0q, .add_size, -8), ._, ._ },
.{ ._, ._, .@"and", .tmp0q, .memad(.src0q, .add_dst0_size, -8), ._, ._ },
.{ ._, ._, .mov, .memad(.dst0q, .add_size, -8), .tmp0q, ._, ._ },
} },
}, .{
@ -162356,6 +162447,136 @@ fn genBody(cg: *CodeGen, body: []const Air.Inst.Index) InnerError!void {
.each = .{ .once = &.{
.{ ._, ._, .mov, .leasi(.src0w, .@"2", .src1), .src2w, ._, ._ },
} },
}, .{
.required_features = .{ .avx, null, null, null },
.src_constraints = .{ .any, .any, .{ .float = .word } },
.patterns = &.{
.{ .src = .{ .to_gpr, .simm32, .to_sse } },
},
.each = .{ .once = &.{
.{ ._, .vp_w, .extr, .leaa(.src0w, .add_src0_elem_size_mul_src1), .src2x, .ui(0), ._ },
} },
}, .{
.required_features = .{ .sse4_1, null, null, null },
.src_constraints = .{ .any, .any, .{ .float = .word } },
.patterns = &.{
.{ .src = .{ .to_gpr, .simm32, .to_sse } },
},
.each = .{ .once = &.{
.{ ._, .p_w, .extr, .leaa(.src0w, .add_src0_elem_size_mul_src1), .src2x, .ui(0), ._ },
} },
}, .{
.required_features = .{ .sse2, null, null, null },
.src_constraints = .{ .any, .any, .{ .float = .word } },
.patterns = &.{
.{ .src = .{ .to_gpr, .simm32, .to_sse } },
},
.extra_temps = .{
.{ .type = .f16, .kind = .{ .rc = .general_purpose } },
.unused,
.unused,
.unused,
.unused,
.unused,
.unused,
.unused,
.unused,
.unused,
.unused,
},
.each = .{ .once = &.{
.{ ._, .p_w, .extr, .tmp0d, .src2x, .ui(0), ._ },
.{ ._, ._, .mov, .leaa(.src0w, .add_src0_elem_size_mul_src1), .tmp0w, ._, ._ },
} },
}, .{
.required_features = .{ .sse, null, null, null },
.src_constraints = .{ .any, .any, .{ .float = .word } },
.patterns = &.{
.{ .src = .{ .to_gpr, .simm32, .to_sse } },
},
.extra_temps = .{
.{ .type = .f32, .kind = .mem },
.{ .type = .f16, .kind = .{ .rc = .general_purpose } },
.unused,
.unused,
.unused,
.unused,
.unused,
.unused,
.unused,
.unused,
.unused,
},
.each = .{ .once = &.{
.{ ._, ._ss, .mov, .mem(.tmp1d), .src2x, ._, ._ },
.{ ._, ._, .mov, .tmp1d, .mem(.tmp1d), ._, ._ },
.{ ._, ._, .mov, .leaa(.src0w, .add_src0_elem_size_mul_src1), .tmp1w, ._, ._ },
} },
}, .{
.required_features = .{ .avx, null, null, null },
.src_constraints = .{ .any, .any, .{ .float = .word } },
.patterns = &.{
.{ .src = .{ .to_gpr, .to_gpr, .to_sse } },
},
.each = .{ .once = &.{
.{ ._, .vp_w, .extr, .leasi(.src0w, .@"2", .src1), .src2x, .ui(0), ._ },
} },
}, .{
.required_features = .{ .sse4_1, null, null, null },
.src_constraints = .{ .any, .any, .{ .float = .word } },
.patterns = &.{
.{ .src = .{ .to_gpr, .to_gpr, .to_sse } },
},
.each = .{ .once = &.{
.{ ._, .p_w, .extr, .leasi(.src0w, .@"2", .src1), .src2x, .ui(0), ._ },
} },
}, .{
.required_features = .{ .sse2, null, null, null },
.src_constraints = .{ .any, .any, .{ .float = .word } },
.patterns = &.{
.{ .src = .{ .to_gpr, .simm32, .to_sse } },
},
.extra_temps = .{
.{ .type = .f16, .kind = .{ .rc = .general_purpose } },
.unused,
.unused,
.unused,
.unused,
.unused,
.unused,
.unused,
.unused,
.unused,
.unused,
},
.each = .{ .once = &.{
.{ ._, .p_w, .extr, .tmp0d, .src2x, .ui(0), ._ },
.{ ._, ._, .mov, .leasi(.src0w, .@"2", .src1), .tmp0w, ._, ._ },
} },
}, .{
.required_features = .{ .sse, null, null, null },
.src_constraints = .{ .any, .any, .{ .float = .word } },
.patterns = &.{
.{ .src = .{ .to_gpr, .simm32, .to_sse } },
},
.extra_temps = .{
.{ .type = .f32, .kind = .mem },
.{ .type = .f16, .kind = .{ .rc = .general_purpose } },
.unused,
.unused,
.unused,
.unused,
.unused,
.unused,
.unused,
.unused,
.unused,
},
.each = .{ .once = &.{
.{ ._, ._ss, .mov, .mem(.tmp1d), .src2x, ._, ._ },
.{ ._, ._, .mov, .tmp1d, .mem(.tmp1d), ._, ._ },
.{ ._, ._, .mov, .leasi(.src0w, .@"2", .src1), .tmp1w, ._, ._ },
} },
}, .{
.src_constraints = .{ .any, .any, .{ .int = .dword } },
.patterns = &.{
@ -162375,29 +162596,119 @@ fn genBody(cg: *CodeGen, body: []const Air.Inst.Index) InnerError!void {
.{ ._, ._, .mov, .leasi(.src0d, .@"4", .src1), .src2d, ._, ._ },
} },
}, .{
.required_features = .{ .@"64bit", null, null, null },
.dst_constraints = .{ .{ .int = .qword }, .any },
.required_features = .{ .avx, null, null, null },
.src_constraints = .{ .any, .any, .{ .float = .dword } },
.patterns = &.{
.{ .src = .{ .to_mem, .simm32, .simm32 } },
.{ .src = .{ .to_mem, .simm32, .to_gpr } },
.{ .src = .{ .to_gpr, .simm32, .to_sse } },
},
.each = .{ .once = &.{
.{ ._, .v_ss, .mov, .leaa(.src0d, .add_src0_elem_size_mul_src1), .src2x, ._, ._ },
} },
}, .{
.required_features = .{ .sse, null, null, null },
.src_constraints = .{ .any, .any, .{ .float = .dword } },
.patterns = &.{
.{ .src = .{ .to_gpr, .simm32, .to_sse } },
},
.each = .{ .once = &.{
.{ ._, ._ss, .mov, .leaa(.src0d, .add_src0_elem_size_mul_src1), .src2x, ._, ._ },
} },
}, .{
.required_features = .{ .avx, null, null, null },
.src_constraints = .{ .any, .any, .{ .float = .dword } },
.patterns = &.{
.{ .src = .{ .to_gpr, .to_gpr, .to_sse } },
},
.each = .{ .once = &.{
.{ ._, .v_ss, .mov, .leasi(.src0d, .@"4", .src1), .src2x, ._, ._ },
} },
}, .{
.required_features = .{ .sse, null, null, null },
.src_constraints = .{ .any, .any, .{ .float = .dword } },
.patterns = &.{
.{ .src = .{ .to_gpr, .to_gpr, .to_sse } },
},
.each = .{ .once = &.{
.{ ._, ._ss, .mov, .leasi(.src0d, .@"4", .src1), .src2x, ._, ._ },
} },
}, .{
.required_features = .{ .@"64bit", null, null, null },
.src_constraints = .{ .any, .any, .{ .int = .qword } },
.patterns = &.{
.{ .src = .{ .to_gpr, .simm32, .simm32 } },
.{ .src = .{ .to_gpr, .simm32, .to_gpr } },
},
.each = .{ .once = &.{
.{ ._, ._, .mov, .leaa(.src0q, .add_src0_elem_size_mul_src1), .src2q, ._, ._ },
} },
}, .{
.required_features = .{ .@"64bit", null, null, null },
.dst_constraints = .{ .{ .int = .qword }, .any },
.src_constraints = .{ .any, .any, .{ .int = .qword } },
.patterns = &.{
.{ .src = .{ .to_mem, .to_gpr, .simm32 } },
.{ .src = .{ .to_mem, .to_gpr, .to_gpr } },
.{ .src = .{ .to_gpr, .to_gpr, .simm32 } },
.{ .src = .{ .to_gpr, .to_gpr, .to_gpr } },
},
.each = .{ .once = &.{
.{ ._, ._, .mov, .leasi(.src0q, .@"8", .src1), .src2q, ._, ._ },
} },
}, .{
.required_features = .{ .avx, null, null, null },
.src_constraints = .{ .any, .any, .{ .float = .qword } },
.patterns = &.{
.{ .src = .{ .to_gpr, .simm32, .to_sse } },
},
.each = .{ .once = &.{
.{ ._, .v_sd, .mov, .leaa(.src0q, .add_src0_elem_size_mul_src1), .src2x, ._, ._ },
} },
}, .{
.required_features = .{ .sse2, null, null, null },
.src_constraints = .{ .any, .any, .{ .float = .qword } },
.patterns = &.{
.{ .src = .{ .to_gpr, .simm32, .to_sse } },
},
.each = .{ .once = &.{
.{ ._, ._sd, .mov, .leaa(.src0q, .add_src0_elem_size_mul_src1), .src2x, ._, ._ },
} },
}, .{
.required_features = .{ .sse, null, null, null },
.src_constraints = .{ .any, .any, .{ .float = .qword } },
.patterns = &.{
.{ .src = .{ .to_gpr, .simm32, .to_sse } },
},
.each = .{ .once = &.{
.{ ._, ._ps, .movl, .leaa(.src0q, .add_src0_elem_size_mul_src1), .src2x, ._, ._ },
} },
}, .{
.required_features = .{ .avx, null, null, null },
.src_constraints = .{ .any, .any, .{ .float = .qword } },
.patterns = &.{
.{ .src = .{ .to_gpr, .to_gpr, .to_sse } },
},
.each = .{ .once = &.{
.{ ._, .v_sd, .mov, .leasi(.src0q, .@"8", .src1), .src2x, ._, ._ },
} },
}, .{
.required_features = .{ .sse2, null, null, null },
.src_constraints = .{ .any, .any, .{ .float = .qword } },
.patterns = &.{
.{ .src = .{ .to_gpr, .to_gpr, .to_sse } },
},
.each = .{ .once = &.{
.{ ._, ._sd, .mov, .leasi(.src0q, .@"8", .src1), .src2x, ._, ._ },
} },
}, .{
.required_features = .{ .sse, null, null, null },
.src_constraints = .{ .any, .any, .{ .float = .qword } },
.patterns = &.{
.{ .src = .{ .to_gpr, .to_gpr, .to_sse } },
},
.each = .{ .once = &.{
.{ ._, ._ps, .movl, .leasi(.src0q, .@"8", .src1), .src2x, ._, ._ },
} },
} }) catch |err| switch (err) {
error.SelectFailed => {
const elem_size = cg.typeOf(bin_op.rhs).abiSize(zcu);
while (try ops[0].toBase(false, cg) or
while (try ops[0].toRegClass(true, .general_purpose, cg) or
try ops[1].toRegClass(true, .general_purpose, cg))
{}
const base_reg = ops[0].tracking(cg).short.register.to64();
@ -162410,11 +162721,10 @@ fn genBody(cg: *CodeGen, body: []const Air.Inst.Index) InnerError!void {
rhs_reg,
.u(elem_size),
);
try cg.asmRegisterMemory(
.{ ._, .lea },
base_reg,
try ops[0].tracking(cg).short.mem(cg, .{ .index = rhs_reg }),
);
try cg.asmRegisterMemory(.{ ._, .lea }, base_reg, .{
.base = .{ .reg = base_reg },
.mod = .{ .rm = .{ .index = rhs_reg } },
});
} else if (elem_size > 8) {
try cg.spillEflagsIfOccupied();
try cg.asmRegisterImmediate(
@ -162422,20 +162732,18 @@ fn genBody(cg: *CodeGen, body: []const Air.Inst.Index) InnerError!void {
rhs_reg,
.u(std.math.log2_int(u64, elem_size)),
);
try cg.asmRegisterMemory(
.{ ._, .lea },
base_reg,
try ops[0].tracking(cg).short.mem(cg, .{ .index = rhs_reg }),
);
} else try cg.asmRegisterMemory(
.{ ._, .lea },
base_reg,
try ops[0].tracking(cg).short.mem(cg, .{
try cg.asmRegisterMemory(.{ ._, .lea }, base_reg, .{
.base = .{ .reg = base_reg },
.mod = .{ .rm = .{ .index = rhs_reg } },
});
} else try cg.asmRegisterMemory(.{ ._, .lea }, base_reg, .{
.base = .{ .reg = base_reg },
.mod = .{ .rm = .{
.index = rhs_reg,
.scale = .fromFactor(@intCast(elem_size)),
}),
);
try ops[0].store(&ops[1], .{}, cg);
} },
});
try ops[0].store(&ops[2], .{}, cg);
},
else => |e| return e,
};
@ -165315,9 +165623,7 @@ fn airShlShrBinOp(self: *CodeGen, inst: Air.Inst.Index) !void {
.ty = mask_ty.toIntern(),
.storage = .{ .elems = &([1]InternPool.Index{
(try rhs_ty.childType(zcu).maxIntScalar(pt, .u8)).toIntern(),
} ++ [1]InternPool.Index{
(try pt.intValue(.u8, 0)).toIntern(),
} ** 15) },
} ++ [1]InternPool.Index{.zero_u8} ** 15) },
} })));
const mask_addr_reg = try self.copyToTmpRegister(.usize, mask_mcv.address());
const mask_addr_lock = self.register_manager.lockRegAssumeUnused(mask_addr_reg);
@ -168138,7 +168444,7 @@ fn load(self: *CodeGen, dst_mcv: MCValue, ptr_ty: Type, ptr_mcv: MCValue) InnerE
.register_quadruple,
.register_overflow,
.register_mask,
.elementwise_regs_then_frame,
.elementwise_args,
.reserved_frame,
=> unreachable, // not a valid pointer
.immediate,
@ -168356,7 +168662,7 @@ fn store(
.register_quadruple,
.register_overflow,
.register_mask,
.elementwise_regs_then_frame,
.elementwise_args,
.reserved_frame,
=> unreachable, // not a valid pointer
.immediate,
@ -168842,7 +169148,7 @@ fn genUnOpMir(self: *CodeGen, mir_tag: Mir.Inst.FixedTag, dst_ty: Type, dst_mcv:
.lea_direct,
.lea_got,
.lea_frame,
.elementwise_regs_then_frame,
.elementwise_args,
.reserved_frame,
.air_ref,
=> unreachable, // unmodifiable destination
@ -170513,7 +170819,7 @@ fn genBinOp(
.load_got,
.lea_got,
.lea_frame,
.elementwise_regs_then_frame,
.elementwise_args,
.reserved_frame,
.air_ref,
=> unreachable,
@ -171696,7 +172002,7 @@ fn genBinOpMir(
.lea_got,
.lea_frame,
.lea_symbol,
.elementwise_regs_then_frame,
.elementwise_args,
.reserved_frame,
.air_ref,
=> unreachable, // unmodifiable destination
@ -171732,7 +172038,7 @@ fn genBinOpMir(
.undef,
.register_overflow,
.register_mask,
.elementwise_regs_then_frame,
.elementwise_args,
.reserved_frame,
=> unreachable,
.register,
@ -171892,7 +172198,7 @@ fn genBinOpMir(
.undef,
.register_overflow,
.register_mask,
.elementwise_regs_then_frame,
.elementwise_args,
.reserved_frame,
.air_ref,
=> unreachable,
@ -171988,7 +172294,7 @@ fn genBinOpMir(
.undef,
.register_overflow,
.register_mask,
.elementwise_regs_then_frame,
.elementwise_args,
.reserved_frame,
.air_ref,
=> unreachable,
@ -172119,7 +172425,7 @@ fn genIntMulComplexOpMir(self: *CodeGen, dst_ty: Type, dst_mcv: MCValue, src_mcv
.lea_direct,
.lea_got,
.lea_frame,
.elementwise_regs_then_frame,
.elementwise_args,
.reserved_frame,
.air_ref,
=> unreachable, // unmodifiable destination
@ -172151,7 +172457,7 @@ fn genIntMulComplexOpMir(self: *CodeGen, dst_ty: Type, dst_mcv: MCValue, src_mcv
.register_quadruple,
.register_overflow,
.register_mask,
.elementwise_regs_then_frame,
.elementwise_args,
.reserved_frame,
.air_ref,
=> unreachable,
@ -172271,7 +172577,7 @@ fn airArg(self: *CodeGen, inst: Air.Inst.Index) !void {
try self.genCopy(arg_ty, dst_mcv, src_mcv, .{});
break :result dst_mcv;
},
.elementwise_regs_then_frame => |regs_frame_addr| {
.elementwise_args => |regs_frame_addr| {
try self.spillEflagsIfOccupied();
const fn_info = zcu.typeToFunc(self.fn_type).?;
@ -172375,7 +172681,7 @@ fn genLocalDebugInfo(
.arg, .dbg_arg_inline, .dbg_var_val => |tag| {
switch (mcv) {
.none => try self.asmAir(.dbg_local, inst),
.unreach, .dead, .elementwise_regs_then_frame, .reserved_frame, .air_ref => unreachable,
.unreach, .dead, .elementwise_args, .reserved_frame, .air_ref => unreachable,
.immediate => |imm| try self.asmAirImmediate(.dbg_local, inst, .u(imm)),
.lea_frame => |frame_addr| try self.asmAirFrameAddress(.dbg_local, inst, frame_addr),
.lea_symbol => |sym_off| try self.asmAirImmediate(.dbg_local, inst, .rel(sym_off)),
@ -172398,7 +172704,7 @@ fn genLocalDebugInfo(
},
.dbg_var_ptr => switch (mcv) {
else => unreachable,
.unreach, .dead, .elementwise_regs_then_frame, .reserved_frame, .air_ref => unreachable,
.unreach, .dead, .elementwise_args, .reserved_frame, .air_ref => unreachable,
.lea_frame => |frame_addr| try self.asmAirMemory(.dbg_local, inst, .{
.base = .{ .frame = frame_addr.index },
.mod = .{ .rm = .{
@ -172567,7 +172873,7 @@ fn genCall(self: *CodeGen, info: union(enum) {
try self.genCopy(arg_ty, dst_arg, src_arg, opts);
try self.freeValue(src_arg);
},
.elementwise_regs_then_frame => |regs_frame_addr| {
.elementwise_args => |regs_frame_addr| {
const index_reg = try self.register_manager.allocReg(null, abi.RegisterClass.gp);
const index_lock = self.register_manager.lockRegAssumeUnused(index_reg);
defer self.register_manager.unlockReg(index_lock);
@ -172676,7 +172982,7 @@ fn genCall(self: *CodeGen, info: union(enum) {
.indirect => |reg_off| try self.genSetReg(reg_off.reg, .usize, .{
.lea_frame = .{ .index = frame_index, .off = -reg_off.off },
}, .{}),
.elementwise_regs_then_frame => |regs_frame_addr| {
.elementwise_args => |regs_frame_addr| {
const src_mem: Memory = if (src_arg.isBase()) try src_arg.mem(self, .{ .size = .dword }) else .{
.base = .{ .reg = try self.copyToTmpRegister(
.usize,
@ -173064,7 +173370,7 @@ fn airCmp(self: *CodeGen, inst: Air.Inst.Index, op: std.math.CompareOperator) !v
.lea_got,
.lea_frame,
.lea_symbol,
.elementwise_regs_then_frame,
.elementwise_args,
.reserved_frame,
.air_ref,
=> unreachable,
@ -173119,7 +173425,7 @@ fn airCmp(self: *CodeGen, inst: Air.Inst.Index, op: std.math.CompareOperator) !v
.lea_direct,
.lea_got,
.lea_frame,
.elementwise_regs_then_frame,
.elementwise_args,
.reserved_frame,
.air_ref,
=> unreachable,
@ -173524,7 +173830,7 @@ fn isNull(self: *CodeGen, inst: Air.Inst.Index, opt_ty: Type, opt_mcv: MCValue)
.lea_direct,
.lea_got,
.lea_symbol,
.elementwise_regs_then_frame,
.elementwise_args,
.reserved_frame,
.air_ref,
=> unreachable,
@ -173868,17 +174174,23 @@ fn lowerBlock(self: *CodeGen, inst: Air.Inst.Index, body: []const Air.Inst.Index
var block_data = self.blocks.fetchRemove(inst).?;
defer block_data.value.deinit(self.gpa);
if (block_data.value.relocs.items.len > 0) {
var last_inst: Mir.Inst.Index = @intCast(self.mir_instructions.len - 1);
while (block_data.value.relocs.getLastOrNull() == last_inst) {
block_data.value.relocs.items.len -= 1;
self.mir_instructions.set(last_inst, .{
.tag = .pseudo,
.ops = .pseudo_dead_none,
.data = undefined,
});
last_inst -= 1;
}
for (block_data.value.relocs.items) |block_reloc| self.performReloc(block_reloc);
try self.restoreState(block_data.value.state, liveness.deaths, .{
.emit_instructions = false,
.update_tracking = true,
.resurrect = true,
.close_scope = true,
});
const block_relocs_last_index = block_data.value.relocs.items.len - 1;
for (if (block_data.value.relocs.items[block_relocs_last_index] == self.mir_instructions.len - 1) block_relocs: {
_ = self.mir_instructions.pop();
break :block_relocs block_data.value.relocs.items[0..block_relocs_last_index];
} else block_data.value.relocs.items) |block_reloc| self.performReloc(block_reloc);
}
if (std.debug.runtime_safety) assert(self.inst_tracking.getIndex(inst).? == inst_tracking_i);
@ -174453,18 +174765,6 @@ fn airBr(self: *CodeGen, inst: Air.Inst.Index) !void {
try self.freeValue(block_tracking.short);
}
fn airRepeat(self: *CodeGen, inst: Air.Inst.Index) !void {
const loop_inst = self.air.instructions.items(.data)[@intFromEnum(inst)].repeat.loop_inst;
const repeat_info = self.loops.get(loop_inst).?;
try self.restoreState(repeat_info.state, &.{}, .{
.emit_instructions = true,
.update_tracking = false,
.resurrect = false,
.close_scope = true,
});
_ = try self.asmJmpReloc(repeat_info.target);
}
fn airAsm(self: *CodeGen, inst: Air.Inst.Index) !void {
@setEvalBranchQuota(1_100);
const pt = self.pt;
@ -175587,7 +175887,7 @@ fn genCopy(self: *CodeGen, ty: Type, dst_mcv: MCValue, src_mcv: MCValue, opts: C
.lea_got,
.lea_frame,
.lea_symbol,
.elementwise_regs_then_frame,
.elementwise_args,
.reserved_frame,
.air_ref,
=> unreachable, // unmodifiable destination
@ -175598,7 +175898,7 @@ fn genCopy(self: *CodeGen, ty: Type, dst_mcv: MCValue, src_mcv: MCValue, opts: C
.dead,
.undef,
.register_overflow,
.elementwise_regs_then_frame,
.elementwise_args,
.reserved_frame,
=> unreachable,
.immediate,
@ -175776,7 +176076,7 @@ fn genSetReg(
.none,
.unreach,
.dead,
.elementwise_regs_then_frame,
.elementwise_args,
.reserved_frame,
=> unreachable,
.undef => if (opts.safety) switch (dst_reg.class()) {
@ -176313,7 +176613,7 @@ fn genSetMem(
.none,
.unreach,
.dead,
.elementwise_regs_then_frame,
.elementwise_args,
.reserved_frame,
=> unreachable,
.undef => if (opts.safety) try self.genInlineMemset(
@ -178566,10 +178866,10 @@ fn airSelect(self: *CodeGen, inst: Air.Inst.Index) !void {
const mask_ty = try pt.vectorType(.{ .len = vec_len, .child = mask_elem_ty.toIntern() });
var mask_elems_buf: [32]InternPool.Index = undefined;
const mask_elems = mask_elems_buf[0..vec_len];
for (mask_elems, 0..) |*elem, bit| elem.* = try pt.intern(.{ .int = .{
.ty = mask_elem_ty.toIntern(),
.storage = .{ .u64 = @as(u64, 1) << @intCast(bit) },
} });
for (mask_elems, 0..) |*elem, bit| elem.* = (try pt.intValue(
mask_elem_ty,
@as(u8, 1) << @truncate(bit),
)).toIntern();
const mask_mcv = try self.genTypedValue(.fromInterned(try pt.intern(.{ .aggregate = .{
.ty = mask_ty.toIntern(),
.storage = .{ .elems = mask_elems },
@ -179437,16 +179737,13 @@ fn airShuffle(self: *CodeGen, inst: Air.Inst.Index) !void {
var lhs_mask_elems: [16]InternPool.Index = undefined;
for (lhs_mask_elems[0..max_abi_size], 0..) |*lhs_mask_elem, byte_index| {
const elem_index = byte_index / elem_abi_size;
lhs_mask_elem.* = try pt.intern(.{ .int = .{
.ty = .u8_type,
.storage = .{ .u64 = if (elem_index >= mask_elems.len) 0b1_00_00000 else elem: {
lhs_mask_elem.* = (try pt.intValue(.u8, if (elem_index >= mask_elems.len) 0b1_00_00000 else elem: {
const mask_elem = mask_elems[elem_index] orelse break :elem 0b1_00_00000;
if (mask_elem < 0) break :elem 0b1_00_00000;
const mask_elem_index: u31 = @intCast(mask_elem);
const byte_off: u32 = @intCast(byte_index % elem_abi_size);
break :elem @intCast(mask_elem_index * elem_abi_size + byte_off);
} },
} });
break :elem mask_elem_index * elem_abi_size + byte_off;
})).toIntern();
}
const lhs_mask_ty = try pt.vectorType(.{ .len = max_abi_size, .child = .u8_type });
const lhs_mask_mcv = try self.genTypedValue(.fromInterned(try pt.intern(.{ .aggregate = .{
@ -179471,16 +179768,13 @@ fn airShuffle(self: *CodeGen, inst: Air.Inst.Index) !void {
var rhs_mask_elems: [16]InternPool.Index = undefined;
for (rhs_mask_elems[0..max_abi_size], 0..) |*rhs_mask_elem, byte_index| {
const elem_index = byte_index / elem_abi_size;
rhs_mask_elem.* = try pt.intern(.{ .int = .{
.ty = .u8_type,
.storage = .{ .u64 = if (elem_index >= mask_elems.len) 0b1_00_00000 else elem: {
rhs_mask_elem.* = (try pt.intValue(.u8, if (elem_index >= mask_elems.len) 0b1_00_00000 else elem: {
const mask_elem = mask_elems[elem_index] orelse break :elem 0b1_00_00000;
if (mask_elem >= 0) break :elem 0b1_00_00000;
const mask_elem_index: u31 = @intCast(~mask_elem);
const byte_off: u32 = @intCast(byte_index % elem_abi_size);
break :elem @intCast(mask_elem_index * elem_abi_size + byte_off);
} },
} });
break :elem mask_elem_index * elem_abi_size + byte_off;
})).toIntern();
}
const rhs_mask_ty = try pt.vectorType(.{ .len = max_abi_size, .child = .u8_type });
const rhs_mask_mcv = try self.genTypedValue(.fromInterned(try pt.intern(.{ .aggregate = .{
@ -180611,7 +180905,7 @@ fn resolveCallingConventionValues(
result.stack_byte_count =
std.mem.alignForward(u31, result.stack_byte_count, frame_elem_align);
arg_mcv[arg_mcv_i] = .{ .elementwise_regs_then_frame = .{
arg_mcv[arg_mcv_i] = .{ .elementwise_args = .{
.regs = remaining_param_int_regs,
.frame_off = @intCast(result.stack_byte_count),
.frame_index = stack_frame_base,
@ -181236,7 +181530,7 @@ const Temp = struct {
.load_got,
.lea_got,
.lea_frame,
.elementwise_regs_then_frame,
.elementwise_args,
.reserved_frame,
.air_ref,
=> false,
@ -181671,7 +181965,7 @@ const Temp = struct {
.register_quadruple,
.register_overflow,
.register_mask,
.elementwise_regs_then_frame,
.elementwise_args,
.reserved_frame,
.air_ref,
=> unreachable, // not a valid pointer
@ -186395,10 +186689,36 @@ const Temp = struct {
if (cg.reused_operands.isSet(op_index)) continue;
try cg.processDeath(op_ref.toIndexAllowNone() orelse continue);
}
if (cg.liveness.isUnused(inst)) try temp.die(cg) else switch (temp.unwrap(cg)) {
if (cg.liveness.isUnused(inst)) try temp.die(cg) else {
switch (temp.unwrap(cg)) {
.ref, .err_ret_trace => {
const temp_mcv = temp.tracking(cg).short;
const result = result: switch (temp_mcv) {
.none, .unreach, .dead, .elementwise_args, .reserved_frame, .air_ref => unreachable,
.undef, .immediate, .lea_frame => temp_mcv,
.eflags,
.register,
.register_pair,
.register_triple,
.register_quadruple,
.register_offset,
.register_overflow,
.register_mask,
.memory,
.load_symbol,
.lea_symbol,
.indirect,
.load_direct,
.lea_direct,
.load_got,
.lea_got,
.load_frame,
=> {
const result = try cg.allocRegOrMem(inst, true);
try cg.genCopy(cg.typeOfIndex(inst), result, temp.tracking(cg).short, .{});
try cg.genCopy(cg.typeOfIndex(inst), result, temp_mcv, .{});
break :result result;
},
};
tracking_log.debug("{} => {} (birth)", .{ inst, result });
cg.inst_tracking.putAssumeCapacityNoClobber(inst, .init(result));
},
@ -186409,6 +186729,7 @@ const Temp = struct {
assert(cg.reuseTemp(inst, temp_index.toIndex(), temp_tracking));
},
}
}
for (0.., op_refs, op_temps) |op_index, op_ref, op_temp| {
if (op_temp.index != temp.index) continue;
if (tomb_bits & @as(Air.Liveness.Bpi, 1) << @intCast(op_index) == 0) continue;
@ -187950,6 +188271,7 @@ const Select = struct {
ptr_bit_size,
size,
src0_size,
dst0_size,
delta_size,
delta_elem_size,
unaligned_size,
@ -187993,6 +188315,7 @@ const Select = struct {
const sub_src0_size: Adjust = .{ .sign = .neg, .lhs = .src0_size, .op = .mul, .rhs = .@"1" };
const add_src0_size: Adjust = .{ .sign = .pos, .lhs = .src0_size, .op = .mul, .rhs = .@"1" };
const add_8_src0_size: Adjust = .{ .sign = .pos, .lhs = .src0_size, .op = .mul, .rhs = .@"8" };
const add_dst0_size: Adjust = .{ .sign = .pos, .lhs = .dst0_size, .op = .mul, .rhs = .@"1" };
const add_delta_size_div_8: Adjust = .{ .sign = .pos, .lhs = .delta_size, .op = .div, .rhs = .@"8" };
const add_delta_elem_size: Adjust = .{ .sign = .pos, .lhs = .delta_elem_size, .op = .mul, .rhs = .@"1" };
const add_delta_elem_size_div_8: Adjust = .{ .sign = .pos, .lhs = .delta_elem_size, .op = .div, .rhs = .@"8" };
@ -188788,6 +189111,7 @@ const Select = struct {
.ptr_bit_size => s.cg.target.ptrBitWidth(),
.size => @intCast(op.flags.base.ref.typeOf(s).abiSize(s.cg.pt.zcu)),
.src0_size => @intCast(Select.Operand.Ref.src0.typeOf(s).abiSize(s.cg.pt.zcu)),
.dst0_size => @intCast(Select.Operand.Ref.dst0.typeOf(s).abiSize(s.cg.pt.zcu)),
.delta_size => @intCast(@as(SignedImm, @intCast(op.flags.base.ref.typeOf(s).abiSize(s.cg.pt.zcu))) -
@as(SignedImm, @intCast(op.flags.index.ref.typeOf(s).abiSize(s.cg.pt.zcu)))),
.delta_elem_size => @intCast(@as(SignedImm, @intCast(op.flags.base.ref.typeOf(s).elemType2(s.cg.pt.zcu).abiSize(s.cg.pt.zcu))) -

View File

@ -27,13 +27,27 @@ pub const CodeGenError = GenerateSymbolError || error{
CodegenFail,
};
fn devFeatureForBackend(comptime backend: std.builtin.CompilerBackend) dev.Feature {
comptime assert(mem.startsWith(u8, @tagName(backend), "stage2_"));
return @field(dev.Feature, @tagName(backend)["stage2_".len..] ++ "_backend");
fn devFeatureForBackend(backend: std.builtin.CompilerBackend) dev.Feature {
return switch (backend) {
.other, .stage1 => unreachable,
.stage2_aarch64 => .aarch64_backend,
.stage2_arm => .arm_backend,
.stage2_c => .c_backend,
.stage2_llvm => .llvm_backend,
.stage2_powerpc => .powerpc_backend,
.stage2_riscv64 => .riscv64_backend,
.stage2_sparc64 => .sparc64_backend,
.stage2_spirv64 => .spirv64_backend,
.stage2_wasm => .wasm_backend,
.stage2_x86 => .x86_backend,
.stage2_x86_64 => .x86_64_backend,
_ => unreachable,
};
}
pub fn importBackend(comptime backend: std.builtin.CompilerBackend) ?type {
fn importBackend(comptime backend: std.builtin.CompilerBackend) type {
return switch (backend) {
.other, .stage1 => unreachable,
.stage2_aarch64 => @import("arch/aarch64/CodeGen.zig"),
.stage2_arm => @import("arch/arm/CodeGen.zig"),
.stage2_c => @import("codegen/c.zig"),
@ -42,11 +56,35 @@ pub fn importBackend(comptime backend: std.builtin.CompilerBackend) ?type {
.stage2_riscv64 => @import("arch/riscv64/CodeGen.zig"),
.stage2_sparc64 => @import("arch/sparc64/CodeGen.zig"),
.stage2_spirv64 => @import("codegen/spirv.zig"),
.stage2_x86_64 => @import("arch/x86_64/CodeGen.zig"),
else => null,
.stage2_wasm => @import("arch/wasm/CodeGen.zig"),
.stage2_x86, .stage2_x86_64 => @import("arch/x86_64/CodeGen.zig"),
_ => unreachable,
};
}
pub fn legalizeFeatures(pt: Zcu.PerThread, nav_index: InternPool.Nav.Index) ?*const Air.Legalize.Features {
const zcu = pt.zcu;
const target = &zcu.navFileScope(nav_index).mod.?.resolved_target.result;
switch (target_util.zigBackend(target.*, zcu.comp.config.use_llvm)) {
else => unreachable,
inline .stage2_llvm,
.stage2_c,
.stage2_wasm,
.stage2_arm,
.stage2_x86_64,
.stage2_aarch64,
.stage2_x86,
.stage2_riscv64,
.stage2_sparc64,
.stage2_spirv64,
.stage2_powerpc,
=> |backend| {
dev.check(devFeatureForBackend(backend));
return importBackend(backend).legalizeFeatures(target);
},
}
}
pub fn generateFunction(
lf: *link.File,
pt: Zcu.PerThread,
@ -60,7 +98,7 @@ pub fn generateFunction(
const zcu = pt.zcu;
const func = zcu.funcInfo(func_index);
const target = zcu.navFileScope(func.owner_nav).mod.?.resolved_target.result;
switch (target_util.zigBackend(target, false)) {
switch (target_util.zigBackend(target, zcu.comp.config.use_llvm)) {
else => unreachable,
inline .stage2_aarch64,
.stage2_arm,
@ -70,7 +108,7 @@ pub fn generateFunction(
.stage2_x86_64,
=> |backend| {
dev.check(devFeatureForBackend(backend));
return importBackend(backend).?.generate(lf, pt, src_loc, func_index, air, liveness, code, debug_output);
return importBackend(backend).generate(lf, pt, src_loc, func_index, air, liveness, code, debug_output);
},
}
}
@ -88,14 +126,14 @@ pub fn generateLazyFunction(
zcu.fileByIndex(inst_index.resolveFile(&zcu.intern_pool)).mod.?.resolved_target.result
else
zcu.getTarget();
switch (target_util.zigBackend(target, false)) {
switch (target_util.zigBackend(target, zcu.comp.config.use_llvm)) {
else => unreachable,
inline .stage2_powerpc,
.stage2_riscv64,
.stage2_x86_64,
=> |backend| {
dev.check(devFeatureForBackend(backend));
return importBackend(backend).?.generateLazy(lf, pt, src_loc, lazy_sym, code, debug_output);
return importBackend(backend).generateLazy(lf, pt, src_loc, lazy_sym, code, debug_output);
},
}
}

View File

@ -4,6 +4,7 @@ const assert = std.debug.assert;
const mem = std.mem;
const log = std.log.scoped(.c);
const dev = @import("../dev.zig");
const link = @import("../link.zig");
const Zcu = @import("../Zcu.zig");
const Module = @import("../Package/Module.zig");
@ -20,6 +21,15 @@ const Alignment = InternPool.Alignment;
const BigIntLimb = std.math.big.Limb;
const BigInt = std.math.big.int;
pub fn legalizeFeatures(_: *const std.Target) ?*const Air.Legalize.Features {
return if (dev.env.supports(.legalize)) comptime &.initMany(&.{
.expand_intcast_safe,
.expand_add_safe,
.expand_sub_safe,
.expand_mul_safe,
}) else null; // we don't currently ask zig1 to use safe optimization modes
}
pub const CType = @import("c/Type.zig");
pub const CValue = union(enum) {
@ -206,7 +216,6 @@ const reserved_idents = std.StaticStringMap(void).initComptime(.{
.{ "atomic_ushort", {} },
.{ "atomic_wchar_t", {} },
.{ "auto", {} },
.{ "bool", {} },
.{ "break", {} },
.{ "case", {} },
.{ "char", {} },
@ -266,6 +275,11 @@ const reserved_idents = std.StaticStringMap(void).initComptime(.{
.{ "va_end", {} },
.{ "va_copy", {} },
// stdbool.h
.{ "bool", {} },
.{ "false", {} },
.{ "true", {} },
// stddef.h
.{ "offsetof", {} },
@ -1591,7 +1605,7 @@ pub const DeclGen = struct {
try writer.writeAll("((");
try dg.renderCType(writer, ctype);
return writer.print("){x})", .{
try dg.fmtIntLiteral(try pt.undefValue(.usize), .Other),
try dg.fmtIntLiteral(.undef_usize, .Other),
});
},
.slice => {
@ -1605,7 +1619,7 @@ pub const DeclGen = struct {
const ptr_ty = ty.slicePtrFieldType(zcu);
try dg.renderType(writer, ptr_ty);
return writer.print("){x}, {0x}}}", .{
try dg.fmtIntLiteral(try dg.pt.undefValue(.usize), .Other),
try dg.fmtIntLiteral(.undef_usize, .Other),
});
},
},
@ -3360,7 +3374,8 @@ fn genBodyInner(f: *Function, body: []const Air.Inst.Index) error{ AnalysisFail,
.error_name => try airErrorName(f, inst),
.splat => try airSplat(f, inst),
.select => try airSelect(f, inst),
.shuffle => try airShuffle(f, inst),
.shuffle_one => try airShuffleOne(f, inst),
.shuffle_two => try airShuffleTwo(f, inst),
.reduce => try airReduce(f, inst),
.aggregate_init => try airAggregateInit(f, inst),
.union_init => try airUnionInit(f, inst),
@ -4179,7 +4194,7 @@ fn airOverflow(f: *Function, inst: Air.Inst.Index, operation: []const u8, info:
try v.elem(f, w);
try w.writeAll(", ");
try f.writeCValue(w, rhs, .FunctionArgument);
try v.elem(f, w);
if (f.typeOf(bin_op.rhs).isVector(zcu)) try v.elem(f, w);
try f.object.dg.renderBuiltinInfo(w, scalar_ty, info);
try w.writeAll(");\n");
try v.end(f, inst, w);
@ -6376,7 +6391,7 @@ fn airArrayToSlice(f: *Function, inst: Air.Inst.Index) !CValue {
if (operand_child_ctype.info(ctype_pool) == .array) {
try writer.writeByte('&');
try f.writeCValueDeref(writer, operand);
try writer.print("[{}]", .{try f.fmtIntLiteral(try pt.intValue(.usize, 0))});
try writer.print("[{}]", .{try f.fmtIntLiteral(.zero_usize)});
} else try f.writeCValue(writer, operand, .Other);
}
try a.end(f, writer);
@ -6536,7 +6551,7 @@ fn airBinBuiltinCall(
try v.elem(f, writer);
try writer.writeAll(", ");
try f.writeCValue(writer, rhs, .FunctionArgument);
try v.elem(f, writer);
if (f.typeOf(bin_op.rhs).isVector(zcu)) try v.elem(f, writer);
try f.object.dg.renderBuiltinInfo(writer, scalar_ty, info);
try writer.writeAll(");\n");
try v.end(f, inst, writer);
@ -6907,7 +6922,7 @@ fn airMemset(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue {
try writer.writeAll("for (");
try f.writeCValue(writer, index, .Other);
try writer.writeAll(" = ");
try f.object.dg.renderValue(writer, try pt.intValue(.usize, 0), .Other);
try f.object.dg.renderValue(writer, .zero_usize, .Other);
try writer.writeAll("; ");
try f.writeCValue(writer, index, .Other);
try writer.writeAll(" != ");
@ -7149,34 +7164,73 @@ fn airSelect(f: *Function, inst: Air.Inst.Index) !CValue {
return local;
}
fn airShuffle(f: *Function, inst: Air.Inst.Index) !CValue {
fn airShuffleOne(f: *Function, inst: Air.Inst.Index) !CValue {
const pt = f.object.dg.pt;
const zcu = pt.zcu;
const ty_pl = f.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const extra = f.air.extraData(Air.Shuffle, ty_pl.payload).data;
const mask = Value.fromInterned(extra.mask);
const lhs = try f.resolveInst(extra.a);
const rhs = try f.resolveInst(extra.b);
const inst_ty = f.typeOfIndex(inst);
const unwrapped = f.air.unwrapShuffleOne(zcu, inst);
const mask = unwrapped.mask;
const operand = try f.resolveInst(unwrapped.operand);
const inst_ty = unwrapped.result_ty;
const writer = f.object.writer();
const local = try f.allocLocal(inst, inst_ty);
try reap(f, inst, &.{ extra.a, extra.b }); // local cannot alias operands
for (0..extra.mask_len) |index| {
try reap(f, inst, &.{unwrapped.operand}); // local cannot alias operand
for (mask, 0..) |mask_elem, out_idx| {
try f.writeCValue(writer, local, .Other);
try writer.writeByte('[');
try f.object.dg.renderValue(writer, try pt.intValue(.usize, index), .Other);
try f.object.dg.renderValue(writer, try pt.intValue(.usize, out_idx), .Other);
try writer.writeAll("] = ");
const mask_elem = (try mask.elemValue(pt, index)).toSignedInt(zcu);
const src_val = try pt.intValue(.usize, @as(u64, @intCast(mask_elem ^ mask_elem >> 63)));
try f.writeCValue(writer, if (mask_elem >= 0) lhs else rhs, .Other);
switch (mask_elem.unwrap()) {
.elem => |src_idx| {
try f.writeCValue(writer, operand, .Other);
try writer.writeByte('[');
try f.object.dg.renderValue(writer, src_val, .Other);
try writer.writeAll("];\n");
try f.object.dg.renderValue(writer, try pt.intValue(.usize, src_idx), .Other);
try writer.writeByte(']');
},
.value => |val| try f.object.dg.renderValue(writer, .fromInterned(val), .Other),
}
try writer.writeAll(";\n");
}
return local;
}
fn airShuffleTwo(f: *Function, inst: Air.Inst.Index) !CValue {
const pt = f.object.dg.pt;
const zcu = pt.zcu;
const unwrapped = f.air.unwrapShuffleTwo(zcu, inst);
const mask = unwrapped.mask;
const operand_a = try f.resolveInst(unwrapped.operand_a);
const operand_b = try f.resolveInst(unwrapped.operand_b);
const inst_ty = unwrapped.result_ty;
const elem_ty = inst_ty.childType(zcu);
const writer = f.object.writer();
const local = try f.allocLocal(inst, inst_ty);
try reap(f, inst, &.{ unwrapped.operand_a, unwrapped.operand_b }); // local cannot alias operands
for (mask, 0..) |mask_elem, out_idx| {
try f.writeCValue(writer, local, .Other);
try writer.writeByte('[');
try f.object.dg.renderValue(writer, try pt.intValue(.usize, out_idx), .Other);
try writer.writeAll("] = ");
switch (mask_elem.unwrap()) {
.a_elem => |src_idx| {
try f.writeCValue(writer, operand_a, .Other);
try writer.writeByte('[');
try f.object.dg.renderValue(writer, try pt.intValue(.usize, src_idx), .Other);
try writer.writeByte(']');
},
.b_elem => |src_idx| {
try f.writeCValue(writer, operand_b, .Other);
try writer.writeByte('[');
try f.object.dg.renderValue(writer, try pt.intValue(.usize, src_idx), .Other);
try writer.writeByte(']');
},
.undef => try f.object.dg.renderUndefValue(writer, elem_ty, .Other),
}
try writer.writeAll(";\n");
}
return local;
@ -8311,11 +8365,11 @@ const Vectorize = struct {
try writer.writeAll("for (");
try f.writeCValue(writer, local, .Other);
try writer.print(" = {d}; ", .{try f.fmtIntLiteral(try pt.intValue(.usize, 0))});
try writer.print(" = {d}; ", .{try f.fmtIntLiteral(.zero_usize)});
try f.writeCValue(writer, local, .Other);
try writer.print(" < {d}; ", .{try f.fmtIntLiteral(try pt.intValue(.usize, ty.vectorLen(zcu)))});
try f.writeCValue(writer, local, .Other);
try writer.print(" += {d}) {{\n", .{try f.fmtIntLiteral(try pt.intValue(.usize, 1))});
try writer.print(" += {d}) {{\n", .{try f.fmtIntLiteral(.one_usize)});
f.object.indent_writer.pushIndent();
break :index .{ .index = local };

View File

@ -1408,6 +1408,15 @@ pub const Pool = struct {
.bits = pt.zcu.errorSetBits(),
}, mod, kind),
.ptr_usize_type,
=> return pool.getPointer(allocator, .{
.elem_ctype = .usize,
}),
.ptr_const_comptime_int_type,
=> return pool.getPointer(allocator, .{
.elem_ctype = .void,
.@"const" = true,
}),
.manyptr_u8_type,
=> return pool.getPointer(allocator, .{
.elem_ctype = .u8,
@ -1418,11 +1427,6 @@ pub const Pool = struct {
.elem_ctype = .u8,
.@"const" = true,
}),
.single_const_pointer_to_comptime_int_type,
=> return pool.getPointer(allocator, .{
.elem_ctype = .void,
.@"const" = true,
}),
.slice_const_u8_type,
.slice_const_u8_sentinel_0_type,
=> {
@ -2157,11 +2161,16 @@ pub const Pool = struct {
},
.undef,
.undef_bool,
.undef_usize,
.undef_u1,
.zero,
.zero_usize,
.zero_u1,
.zero_u8,
.one,
.one_usize,
.one_u1,
.one_u8,
.four_u8,
.negative_one,
@ -2172,7 +2181,7 @@ pub const Pool = struct {
.bool_false,
.empty_tuple,
.none,
=> unreachable,
=> unreachable, // values, not types
_ => |ip_index| switch (ip.indexToKey(ip_index)) {
.int_type => |int_info| return pool.fromIntInfo(allocator, int_info, mod, kind),

View File

@ -36,6 +36,10 @@ const compilerRtIntAbbrev = target_util.compilerRtIntAbbrev;
const Error = error{ OutOfMemory, CodegenFail };
pub fn legalizeFeatures(_: *const std.Target) ?*const Air.Legalize.Features {
return null;
}
fn subArchName(features: std.Target.Cpu.Feature.Set, arch: anytype, mappings: anytype) ?[]const u8 {
inline for (mappings) |mapping| {
if (arch.featureSetHas(features, mapping[0])) return mapping[1];
@ -3081,10 +3085,11 @@ pub const Object = struct {
.undefined_type,
.enum_literal_type,
=> unreachable,
.ptr_usize_type,
.ptr_const_comptime_int_type,
.manyptr_u8_type,
.manyptr_const_u8_type,
.manyptr_const_u8_sentinel_0_type,
.single_const_pointer_to_comptime_int_type,
=> .ptr,
.slice_const_u8_type,
.slice_const_u8_sentinel_0_type,
@ -3098,11 +3103,16 @@ pub const Object = struct {
=> unreachable,
// values, not types
.undef,
.undef_bool,
.undef_usize,
.undef_u1,
.zero,
.zero_usize,
.zero_u1,
.zero_u8,
.one,
.one_usize,
.one_u1,
.one_u8,
.four_u8,
.negative_one,
@ -4959,7 +4969,8 @@ pub const FuncGen = struct {
.error_name => try self.airErrorName(inst),
.splat => try self.airSplat(inst),
.select => try self.airSelect(inst),
.shuffle => try self.airShuffle(inst),
.shuffle_one => try self.airShuffleOne(inst),
.shuffle_two => try self.airShuffleTwo(inst),
.aggregate_init => try self.airAggregateInit(inst),
.union_init => try self.airUnionInit(inst),
.prefetch => try self.airPrefetch(inst),
@ -8917,6 +8928,8 @@ pub const FuncGen = struct {
const rhs = try self.resolveInst(extra.rhs);
const lhs_ty = self.typeOf(extra.lhs);
if (lhs_ty.isVector(zcu) and !self.typeOf(extra.rhs).isVector(zcu))
return self.ng.todo("implement vector shifts with scalar rhs", .{});
const lhs_scalar_ty = lhs_ty.scalarType(zcu);
const dest_ty = self.typeOfIndex(inst);
@ -8986,6 +8999,8 @@ pub const FuncGen = struct {
const rhs = try self.resolveInst(bin_op.rhs);
const lhs_ty = self.typeOf(bin_op.lhs);
if (lhs_ty.isVector(zcu) and !self.typeOf(bin_op.rhs).isVector(zcu))
return self.ng.todo("implement vector shifts with scalar rhs", .{});
const lhs_scalar_ty = lhs_ty.scalarType(zcu);
const casted_rhs = try self.wip.conv(.unsigned, rhs, try o.lowerType(lhs_ty), "");
@ -8997,14 +9012,17 @@ pub const FuncGen = struct {
fn airShl(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
const o = self.ng.object;
const zcu = o.pt.zcu;
const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
const lhs = try self.resolveInst(bin_op.lhs);
const rhs = try self.resolveInst(bin_op.rhs);
const lhs_type = self.typeOf(bin_op.lhs);
const lhs_ty = self.typeOf(bin_op.lhs);
if (lhs_ty.isVector(zcu) and !self.typeOf(bin_op.rhs).isVector(zcu))
return self.ng.todo("implement vector shifts with scalar rhs", .{});
const casted_rhs = try self.wip.conv(.unsigned, rhs, try o.lowerType(lhs_type), "");
const casted_rhs = try self.wip.conv(.unsigned, rhs, try o.lowerType(lhs_ty), "");
return self.wip.bin(.shl, lhs, casted_rhs, "");
}
@ -9023,6 +9041,8 @@ pub const FuncGen = struct {
const llvm_lhs_scalar_ty = llvm_lhs_ty.scalarType(&o.builder);
const rhs_ty = self.typeOf(bin_op.rhs);
if (lhs_ty.isVector(zcu) and !rhs_ty.isVector(zcu))
return self.ng.todo("implement vector shifts with scalar rhs", .{});
const rhs_info = rhs_ty.intInfo(zcu);
assert(rhs_info.signedness == .unsigned);
const llvm_rhs_ty = try o.lowerType(rhs_ty);
@ -9095,6 +9115,8 @@ pub const FuncGen = struct {
const rhs = try self.resolveInst(bin_op.rhs);
const lhs_ty = self.typeOf(bin_op.lhs);
if (lhs_ty.isVector(zcu) and !self.typeOf(bin_op.rhs).isVector(zcu))
return self.ng.todo("implement vector shifts with scalar rhs", .{});
const lhs_scalar_ty = lhs_ty.scalarType(zcu);
const casted_rhs = try self.wip.conv(.unsigned, rhs, try o.lowerType(lhs_ty), "");
@ -9167,11 +9189,7 @@ pub const FuncGen = struct {
const is_vector = operand_ty.zigTypeTag(zcu) == .vector;
assert(is_vector == (dest_ty.zigTypeTag(zcu) == .vector));
const min_panic_id: Zcu.SimplePanicId, const max_panic_id: Zcu.SimplePanicId = id: {
if (dest_is_enum) break :id .{ .invalid_enum_value, .invalid_enum_value };
if (dest_info.signedness == .unsigned) break :id .{ .negative_to_unsigned, .cast_truncated_data };
break :id .{ .cast_truncated_data, .cast_truncated_data };
};
const panic_id: Zcu.SimplePanicId = if (dest_is_enum) .invalid_enum_value else .integer_out_of_bounds;
if (have_min_check) {
const min_const_scalar = try minIntConst(&o.builder, dest_scalar, operand_scalar_llvm_ty, zcu);
@ -9185,7 +9203,7 @@ pub const FuncGen = struct {
const ok_block = try fg.wip.block(1, "IntMinOk");
_ = try fg.wip.brCond(ok, ok_block, fail_block, .none);
fg.wip.cursor = .{ .block = fail_block };
try fg.buildSimplePanic(min_panic_id);
try fg.buildSimplePanic(panic_id);
fg.wip.cursor = .{ .block = ok_block };
}
@ -9201,7 +9219,7 @@ pub const FuncGen = struct {
const ok_block = try fg.wip.block(1, "IntMaxOk");
_ = try fg.wip.brCond(ok, ok_block, fail_block, .none);
fg.wip.cursor = .{ .block = fail_block };
try fg.buildSimplePanic(max_panic_id);
try fg.buildSimplePanic(panic_id);
fg.wip.cursor = .{ .block = ok_block };
}
}
@ -9249,8 +9267,6 @@ pub const FuncGen = struct {
const operand_ty = self.typeOf(ty_op.operand);
const dest_ty = self.typeOfIndex(inst);
const target = zcu.getTarget();
const dest_bits = dest_ty.floatBits(target);
const src_bits = operand_ty.floatBits(target);
if (intrinsicsAllowed(dest_ty, target) and intrinsicsAllowed(operand_ty, target)) {
return self.wip.cast(.fptrunc, operand, try o.lowerType(dest_ty), "");
@ -9258,6 +9274,8 @@ pub const FuncGen = struct {
const operand_llvm_ty = try o.lowerType(operand_ty);
const dest_llvm_ty = try o.lowerType(dest_ty);
const dest_bits = dest_ty.floatBits(target);
const src_bits = operand_ty.floatBits(target);
const fn_name = try o.builder.strtabStringFmt("__trunc{s}f{s}f2", .{
compilerRtFloatAbbrev(src_bits), compilerRtFloatAbbrev(dest_bits),
});
@ -9342,11 +9360,12 @@ pub const FuncGen = struct {
return self.wip.conv(.unsigned, operand, llvm_dest_ty, "");
}
if (operand_ty.zigTypeTag(zcu) == .int and inst_ty.isPtrAtRuntime(zcu)) {
const operand_scalar_ty = operand_ty.scalarType(zcu);
const inst_scalar_ty = inst_ty.scalarType(zcu);
if (operand_scalar_ty.zigTypeTag(zcu) == .int and inst_scalar_ty.isPtrAtRuntime(zcu)) {
return self.wip.cast(.inttoptr, operand, llvm_dest_ty, "");
}
if (operand_ty.isPtrAtRuntime(zcu) and inst_ty.zigTypeTag(zcu) == .int) {
if (operand_scalar_ty.isPtrAtRuntime(zcu) and inst_scalar_ty.zigTypeTag(zcu) == .int) {
return self.wip.cast(.ptrtoint, operand, llvm_dest_ty, "");
}
@ -9644,7 +9663,7 @@ pub const FuncGen = struct {
const zcu = o.pt.zcu;
const ip = &zcu.intern_pool;
for (body_tail[1..]) |body_inst| {
switch (fg.liveness.categorizeOperand(fg.air, body_inst, body_tail[0], ip)) {
switch (fg.liveness.categorizeOperand(fg.air, zcu, body_inst, body_tail[0], ip)) {
.none => continue,
.write, .noret, .complex => return false,
.tomb => return true,
@ -10399,42 +10418,192 @@ pub const FuncGen = struct {
return self.wip.select(.normal, pred, a, b, "");
}
fn airShuffle(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
const o = self.ng.object;
fn airShuffleOne(fg: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
const o = fg.ng.object;
const pt = o.pt;
const zcu = pt.zcu;
const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const extra = self.air.extraData(Air.Shuffle, ty_pl.payload).data;
const a = try self.resolveInst(extra.a);
const b = try self.resolveInst(extra.b);
const mask = Value.fromInterned(extra.mask);
const mask_len = extra.mask_len;
const a_len = self.typeOf(extra.a).vectorLen(zcu);
const gpa = zcu.gpa;
// LLVM uses integers larger than the length of the first array to
// index into the second array. This was deemed unnecessarily fragile
// when changing code, so Zig uses negative numbers to index the
// second vector. These start at -1 and go down, and are easiest to use
// with the ~ operator. Here we convert between the two formats.
const values = try self.gpa.alloc(Builder.Constant, mask_len);
defer self.gpa.free(values);
const unwrapped = fg.air.unwrapShuffleOne(zcu, inst);
for (values, 0..) |*val, i| {
const elem = try mask.elemValue(pt, i);
if (elem.isUndef(zcu)) {
val.* = try o.builder.undefConst(.i32);
} else {
const int = elem.toSignedInt(zcu);
const unsigned: u32 = @intCast(if (int >= 0) int else ~int + a_len);
val.* = try o.builder.intConst(.i32, unsigned);
}
const operand = try fg.resolveInst(unwrapped.operand);
const mask = unwrapped.mask;
const operand_ty = fg.typeOf(unwrapped.operand);
const llvm_operand_ty = try o.lowerType(operand_ty);
const llvm_result_ty = try o.lowerType(unwrapped.result_ty);
const llvm_elem_ty = try o.lowerType(unwrapped.result_ty.childType(zcu));
const llvm_poison_elem = try o.builder.poisonConst(llvm_elem_ty);
const llvm_poison_mask_elem = try o.builder.poisonConst(.i32);
const llvm_mask_ty = try o.builder.vectorType(.normal, @intCast(mask.len), .i32);
// LLVM requires that the two input vectors have the same length, so lowering isn't trivial.
// And, in the words of jacobly0: "llvm sucks at shuffles so we do have to hold its hand at
// least a bit". So, there are two cases here.
//
// If the operand length equals the mask length, we do just the one `shufflevector`, where
// the second operand is a constant vector with comptime-known elements at the right indices
// and poison values elsewhere (in the indices which won't be selected).
//
// Otherwise, we lower to *two* `shufflevector` instructions. The first shuffles the runtime
// operand with an all-poison vector to extract and correctly position all of the runtime
// elements. We also make a constant vector with all of the comptime elements correctly
// positioned. Then, our second instruction selects elements from those "runtime-or-poison"
// and "comptime-or-poison" vectors to compute the result.
// This buffer is used primarily for the mask constants.
const llvm_elem_buf = try gpa.alloc(Builder.Constant, mask.len);
defer gpa.free(llvm_elem_buf);
// ...but first, we'll collect all of the comptime-known values.
var any_defined_comptime_value = false;
for (mask, llvm_elem_buf) |mask_elem, *llvm_elem| {
llvm_elem.* = switch (mask_elem.unwrap()) {
.elem => llvm_poison_elem,
.value => |val| if (!Value.fromInterned(val).isUndef(zcu)) elem: {
any_defined_comptime_value = true;
break :elem try o.lowerValue(val);
} else llvm_poison_elem,
};
}
// This vector is like the result, but runtime elements are replaced with poison.
const comptime_and_poison: Builder.Value = if (any_defined_comptime_value) vec: {
break :vec try o.builder.vectorValue(llvm_result_ty, llvm_elem_buf);
} else try o.builder.poisonValue(llvm_result_ty);
const llvm_mask_value = try o.builder.vectorValue(
try o.builder.vectorType(.normal, mask_len, .i32),
values,
if (operand_ty.vectorLen(zcu) == mask.len) {
// input length equals mask/output length, so we lower to one instruction
for (mask, llvm_elem_buf, 0..) |mask_elem, *llvm_elem, elem_idx| {
llvm_elem.* = switch (mask_elem.unwrap()) {
.elem => |idx| try o.builder.intConst(.i32, idx),
.value => |val| if (!Value.fromInterned(val).isUndef(zcu)) mask_val: {
break :mask_val try o.builder.intConst(.i32, mask.len + elem_idx);
} else llvm_poison_mask_elem,
};
}
return fg.wip.shuffleVector(
operand,
comptime_and_poison,
try o.builder.vectorValue(llvm_mask_ty, llvm_elem_buf),
"",
);
}
for (mask, llvm_elem_buf) |mask_elem, *llvm_elem| {
llvm_elem.* = switch (mask_elem.unwrap()) {
.elem => |idx| try o.builder.intConst(.i32, idx),
.value => llvm_poison_mask_elem,
};
}
// This vector is like our result, but all comptime-known elements are poison.
const runtime_and_poison = try fg.wip.shuffleVector(
operand,
try o.builder.poisonValue(llvm_operand_ty),
try o.builder.vectorValue(llvm_mask_ty, llvm_elem_buf),
"",
);
if (!any_defined_comptime_value) {
// `comptime_and_poison` is just poison; a second shuffle would be a nop.
return runtime_and_poison;
}
// In this second shuffle, the inputs, the mask, and the output all have the same length.
for (mask, llvm_elem_buf, 0..) |mask_elem, *llvm_elem, elem_idx| {
llvm_elem.* = switch (mask_elem.unwrap()) {
.elem => try o.builder.intConst(.i32, elem_idx),
.value => |val| if (!Value.fromInterned(val).isUndef(zcu)) mask_val: {
break :mask_val try o.builder.intConst(.i32, mask.len + elem_idx);
} else llvm_poison_mask_elem,
};
}
// Merge the runtime and comptime elements with the mask we just built.
return fg.wip.shuffleVector(
runtime_and_poison,
comptime_and_poison,
try o.builder.vectorValue(llvm_mask_ty, llvm_elem_buf),
"",
);
}
fn airShuffleTwo(fg: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
const o = fg.ng.object;
const pt = o.pt;
const zcu = pt.zcu;
const gpa = zcu.gpa;
const unwrapped = fg.air.unwrapShuffleTwo(zcu, inst);
const mask = unwrapped.mask;
const llvm_elem_ty = try o.lowerType(unwrapped.result_ty.childType(zcu));
const llvm_mask_ty = try o.builder.vectorType(.normal, @intCast(mask.len), .i32);
const llvm_poison_mask_elem = try o.builder.poisonConst(.i32);
// This is kind of simpler than in `airShuffleOne`. We extend the shorter vector to the
// length of the longer one with an initial `shufflevector` if necessary, and then do the
// actual computation with a second `shufflevector`.
const operand_a_len = fg.typeOf(unwrapped.operand_a).vectorLen(zcu);
const operand_b_len = fg.typeOf(unwrapped.operand_b).vectorLen(zcu);
const operand_len: u32 = @max(operand_a_len, operand_b_len);
// If we need to extend an operand, this is the type that mask will have.
const llvm_operand_mask_ty = try o.builder.vectorType(.normal, operand_len, .i32);
const llvm_elem_buf = try gpa.alloc(Builder.Constant, @max(mask.len, operand_len));
defer gpa.free(llvm_elem_buf);
const operand_a: Builder.Value = extend: {
const raw = try fg.resolveInst(unwrapped.operand_a);
if (operand_a_len == operand_len) break :extend raw;
// Extend with a `shufflevector`, with a mask `<0, 1, ..., n, poison, poison, ..., poison>`
const mask_elems = llvm_elem_buf[0..operand_len];
for (mask_elems[0..operand_a_len], 0..) |*llvm_elem, elem_idx| {
llvm_elem.* = try o.builder.intConst(.i32, elem_idx);
}
@memset(mask_elems[operand_a_len..], llvm_poison_mask_elem);
const llvm_this_operand_ty = try o.builder.vectorType(.normal, operand_a_len, llvm_elem_ty);
break :extend try fg.wip.shuffleVector(
raw,
try o.builder.poisonValue(llvm_this_operand_ty),
try o.builder.vectorValue(llvm_operand_mask_ty, mask_elems),
"",
);
};
const operand_b: Builder.Value = extend: {
const raw = try fg.resolveInst(unwrapped.operand_b);
if (operand_b_len == operand_len) break :extend raw;
// Extend with a `shufflevector`, with a mask `<0, 1, ..., n, poison, poison, ..., poison>`
const mask_elems = llvm_elem_buf[0..operand_len];
for (mask_elems[0..operand_b_len], 0..) |*llvm_elem, elem_idx| {
llvm_elem.* = try o.builder.intConst(.i32, elem_idx);
}
@memset(mask_elems[operand_b_len..], llvm_poison_mask_elem);
const llvm_this_operand_ty = try o.builder.vectorType(.normal, operand_b_len, llvm_elem_ty);
break :extend try fg.wip.shuffleVector(
raw,
try o.builder.poisonValue(llvm_this_operand_ty),
try o.builder.vectorValue(llvm_operand_mask_ty, mask_elems),
"",
);
};
// `operand_a` and `operand_b` now have the same length (we've extended the shorter one with
// an initial shuffle if necessary). Now for the easy bit.
const mask_elems = llvm_elem_buf[0..mask.len];
for (mask, mask_elems) |mask_elem, *llvm_mask_elem| {
llvm_mask_elem.* = switch (mask_elem.unwrap()) {
.a_elem => |idx| try o.builder.intConst(.i32, idx),
.b_elem => |idx| try o.builder.intConst(.i32, operand_len + idx),
.undef => llvm_poison_mask_elem,
};
}
return fg.wip.shuffleVector(
operand_a,
operand_b,
try o.builder.vectorValue(llvm_mask_ty, mask_elems),
"",
);
return self.wip.shuffleVector(a, b, llvm_mask_value, "");
}
/// Reduce a vector by repeatedly applying `llvm_fn` to produce an accumulated result.

View File

@ -28,6 +28,15 @@ const SpvAssembler = @import("spirv/Assembler.zig");
const InstMap = std.AutoHashMapUnmanaged(Air.Inst.Index, IdRef);
pub fn legalizeFeatures(_: *const std.Target) *const Air.Legalize.Features {
return comptime &.initMany(&.{
.expand_intcast_safe,
.expand_add_safe,
.expand_sub_safe,
.expand_mul_safe,
});
}
pub const zig_call_abi_ver = 3;
pub const big_int_bits = 32;
@ -3243,7 +3252,8 @@ const NavGen = struct {
.splat => try self.airSplat(inst),
.reduce, .reduce_optimized => try self.airReduce(inst),
.shuffle => try self.airShuffle(inst),
.shuffle_one => try self.airShuffleOne(inst),
.shuffle_two => try self.airShuffleTwo(inst),
.ptr_add => try self.airPtrAdd(inst),
.ptr_sub => try self.airPtrSub(inst),
@ -3380,6 +3390,10 @@ const NavGen = struct {
const zcu = self.pt.zcu;
const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
if (self.typeOf(bin_op.lhs).isVector(zcu) and !self.typeOf(bin_op.rhs).isVector(zcu)) {
return self.fail("vector shift with scalar rhs", .{});
}
const base = try self.temporary(bin_op.lhs);
const shift = try self.temporary(bin_op.rhs);
@ -3866,6 +3880,10 @@ const NavGen = struct {
const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const extra = self.air.extraData(Air.Bin, ty_pl.payload).data;
if (self.typeOf(extra.lhs).isVector(zcu) and !self.typeOf(extra.rhs).isVector(zcu)) {
return self.fail("vector shift with scalar rhs", .{});
}
const base = try self.temporary(extra.lhs);
const shift = try self.temporary(extra.rhs);
@ -4030,40 +4048,57 @@ const NavGen = struct {
return result_id;
}
fn airShuffle(self: *NavGen, inst: Air.Inst.Index) !?IdRef {
const pt = self.pt;
fn airShuffleOne(ng: *NavGen, inst: Air.Inst.Index) !?IdRef {
const pt = ng.pt;
const zcu = pt.zcu;
const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const extra = self.air.extraData(Air.Shuffle, ty_pl.payload).data;
const a = try self.resolve(extra.a);
const b = try self.resolve(extra.b);
const mask = Value.fromInterned(extra.mask);
const gpa = zcu.gpa;
// Note: number of components in the result, a, and b may differ.
const result_ty = self.typeOfIndex(inst);
const scalar_ty = result_ty.scalarType(zcu);
const scalar_ty_id = try self.resolveType(scalar_ty, .direct);
const unwrapped = ng.air.unwrapShuffleOne(zcu, inst);
const mask = unwrapped.mask;
const result_ty = unwrapped.result_ty;
const elem_ty = result_ty.childType(zcu);
const operand = try ng.resolve(unwrapped.operand);
const constituents = try self.gpa.alloc(IdRef, result_ty.vectorLen(zcu));
defer self.gpa.free(constituents);
const constituents = try gpa.alloc(IdRef, mask.len);
defer gpa.free(constituents);
for (constituents, 0..) |*id, i| {
const elem = try mask.elemValue(pt, i);
if (elem.isUndef(zcu)) {
id.* = try self.spv.constUndef(scalar_ty_id);
continue;
for (constituents, mask) |*id, mask_elem| {
id.* = switch (mask_elem.unwrap()) {
.elem => |idx| try ng.extractVectorComponent(elem_ty, operand, idx),
.value => |val| try ng.constant(elem_ty, .fromInterned(val), .direct),
};
}
const index = elem.toSignedInt(zcu);
if (index >= 0) {
id.* = try self.extractVectorComponent(scalar_ty, a, @intCast(index));
} else {
id.* = try self.extractVectorComponent(scalar_ty, b, @intCast(~index));
}
const result_ty_id = try ng.resolveType(result_ty, .direct);
return try ng.constructComposite(result_ty_id, constituents);
}
const result_ty_id = try self.resolveType(result_ty, .direct);
return try self.constructComposite(result_ty_id, constituents);
fn airShuffleTwo(ng: *NavGen, inst: Air.Inst.Index) !?IdRef {
const pt = ng.pt;
const zcu = pt.zcu;
const gpa = zcu.gpa;
const unwrapped = ng.air.unwrapShuffleTwo(zcu, inst);
const mask = unwrapped.mask;
const result_ty = unwrapped.result_ty;
const elem_ty = result_ty.childType(zcu);
const elem_ty_id = try ng.resolveType(elem_ty, .direct);
const operand_a = try ng.resolve(unwrapped.operand_a);
const operand_b = try ng.resolve(unwrapped.operand_b);
const constituents = try gpa.alloc(IdRef, mask.len);
defer gpa.free(constituents);
for (constituents, mask) |*id, mask_elem| {
id.* = switch (mask_elem.unwrap()) {
.a_elem => |idx| try ng.extractVectorComponent(elem_ty, operand_a, idx),
.b_elem => |idx| try ng.extractVectorComponent(elem_ty, operand_b, idx),
.undef => try ng.spv.constUndef(elem_ty_id),
};
}
const result_ty_id = try ng.resolveType(result_ty, .direct);
return try ng.constructComposite(result_ty_id, constituents);
}
fn indicesToIds(self: *NavGen, indices: []const u32) ![]IdRef {

View File

@ -1,5 +1,8 @@
pub const Env = enum {
/// zig1 features
/// - `-ofmt=c` only
/// - `-OReleaseFast` or `-OReleaseSmall` only
/// - no `@setRuntimeSafety(true)`
bootstrap,
/// zig2 features
@ -67,6 +70,7 @@ pub const Env = enum {
.incremental,
.ast_gen,
.sema,
.legalize,
.llvm_backend,
.c_backend,
.wasm_backend,
@ -144,6 +148,7 @@ pub const Env = enum {
.build_command,
.stdio_listen,
.incremental,
.legalize,
.x86_64_backend,
.elf_linker,
=> true,
@ -222,6 +227,7 @@ pub const Feature = enum {
incremental,
ast_gen,
sema,
legalize,
llvm_backend,
c_backend,

View File

@ -260,7 +260,7 @@ pub const MutableValue = union(enum) {
const ptr = try arena.create(MutableValue);
const len = try arena.create(MutableValue);
ptr.* = .{ .interned = try pt.intern(.{ .undef = ip.slicePtrType(ty_ip) }) };
len.* = .{ .interned = try pt.intern(.{ .undef = .usize_type }) };
len.* = .{ .interned = .undef_usize };
mv.* = .{ .slice = .{
.ty = ty_ip,
.ptr = ptr,
@ -464,7 +464,7 @@ pub const MutableValue = union(enum) {
return switch (field_idx) {
Value.slice_ptr_index => .{ .interned = Value.fromInterned(ip_index).slicePtr(pt.zcu).toIntern() },
Value.slice_len_index => .{ .interned = switch (pt.zcu.intern_pool.indexToKey(ip_index)) {
.undef => try pt.intern(.{ .undef = .usize_type }),
.undef => .undef_usize,
.slice => |s| s.len,
else => unreachable,
} },

View File

@ -315,7 +315,8 @@ const Writer = struct {
.wasm_memory_grow => try w.writeWasmMemoryGrow(s, inst),
.mul_add => try w.writeMulAdd(s, inst),
.select => try w.writeSelect(s, inst),
.shuffle => try w.writeShuffle(s, inst),
.shuffle_one => try w.writeShuffleOne(s, inst),
.shuffle_two => try w.writeShuffleTwo(s, inst),
.reduce, .reduce_optimized => try w.writeReduce(s, inst),
.cmp_vector, .cmp_vector_optimized => try w.writeCmpVector(s, inst),
.vector_store_elem => try w.writeVectorStoreElem(s, inst),
@ -499,14 +500,39 @@ const Writer = struct {
try w.writeOperand(s, inst, 2, pl_op.operand);
}
fn writeShuffle(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void {
const ty_pl = w.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const extra = w.air.extraData(Air.Shuffle, ty_pl.payload).data;
try w.writeOperand(s, inst, 0, extra.a);
fn writeShuffleOne(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void {
const unwrapped = w.air.unwrapShuffleOne(w.pt.zcu, inst);
try w.writeType(s, unwrapped.result_ty);
try s.writeAll(", ");
try w.writeOperand(s, inst, 1, extra.b);
try s.print(", mask {d}, len {d}", .{ extra.mask, extra.mask_len });
try w.writeOperand(s, inst, 0, unwrapped.operand);
try s.writeAll(", [");
for (unwrapped.mask, 0..) |mask_elem, mask_idx| {
if (mask_idx > 0) try s.writeAll(", ");
switch (mask_elem.unwrap()) {
.elem => |idx| try s.print("elem {d}", .{idx}),
.value => |val| try s.print("val {}", .{Value.fromInterned(val).fmtValue(w.pt)}),
}
}
try s.writeByte(']');
}
fn writeShuffleTwo(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void {
const unwrapped = w.air.unwrapShuffleTwo(w.pt.zcu, inst);
try w.writeType(s, unwrapped.result_ty);
try s.writeAll(", ");
try w.writeOperand(s, inst, 0, unwrapped.operand_a);
try s.writeAll(", ");
try w.writeOperand(s, inst, 1, unwrapped.operand_b);
try s.writeAll(", [");
for (unwrapped.mask, 0..) |mask_elem, mask_idx| {
if (mask_idx > 0) try s.writeAll(", ");
switch (mask_elem.unwrap()) {
.a_elem => |idx| try s.print("a_elem {d}", .{idx}),
.b_elem => |idx| try s.print("b_elem {d}", .{idx}),
.undef => try s.writeAll("undef"),
}
}
try s.writeByte(']');
}
fn writeSelect(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void {

View File

@ -842,17 +842,9 @@ pub inline fn backendSupportsFeature(backend: std.builtin.CompilerBackend, compt
.stage2_c, .stage2_llvm, .stage2_x86_64 => true,
else => false,
},
.safety_checked_instructions => switch (backend) {
.stage2_llvm => true,
else => false,
},
.separate_thread => switch (backend) {
.stage2_llvm => false,
else => true,
},
.all_vector_instructions => switch (backend) {
.stage2_x86_64 => true,
else => false,
},
};
}

View File

@ -481,6 +481,7 @@
zig_extern void *memcpy (void *zig_restrict, void const *zig_restrict, size_t);
zig_extern void *memset (void *, int, size_t);
zig_extern void *memmove (void *, void const *, size_t);
/* ================ Bool and 8/16/32/64-bit Integer Support ================= */
@ -1114,14 +1115,15 @@ static inline bool zig_mulo_i16(int16_t *res, int16_t lhs, int16_t rhs, uint8_t
\
static inline uint##w##_t zig_shls_u##w(uint##w##_t lhs, uint##w##_t rhs, uint8_t bits) { \
uint##w##_t res; \
if (rhs >= bits) return lhs != UINT##w##_C(0) ? zig_maxInt_u(w, bits) : lhs; \
return zig_shlo_u##w(&res, lhs, (uint8_t)rhs, bits) ? zig_maxInt_u(w, bits) : res; \
if (rhs < bits && !zig_shlo_u##w(&res, lhs, rhs, bits)) return res; \
return lhs == INT##w##_C(0) ? INT##w##_C(0) : zig_maxInt_u(w, bits); \
} \
\
static inline int##w##_t zig_shls_i##w(int##w##_t lhs, int##w##_t rhs, uint8_t bits) { \
static inline int##w##_t zig_shls_i##w(int##w##_t lhs, uint##w##_t rhs, uint8_t bits) { \
int##w##_t res; \
if ((uint##w##_t)rhs < (uint##w##_t)bits && !zig_shlo_i##w(&res, lhs, (uint8_t)rhs, bits)) return res; \
return lhs < INT##w##_C(0) ? zig_minInt_i(w, bits) : zig_maxInt_i(w, bits); \
if (rhs < bits && !zig_shlo_i##w(&res, lhs, rhs, bits)) return res; \
return lhs == INT##w##_C(0) ? INT##w##_C(0) : \
lhs < INT##w##_C(0) ? zig_minInt_i(w, bits) : zig_maxInt_i(w, bits); \
} \
\
static inline uint##w##_t zig_adds_u##w(uint##w##_t lhs, uint##w##_t rhs, uint8_t bits) { \
@ -1850,15 +1852,23 @@ static inline bool zig_shlo_i128(zig_i128 *res, zig_i128 lhs, uint8_t rhs, uint8
static inline zig_u128 zig_shls_u128(zig_u128 lhs, zig_u128 rhs, uint8_t bits) {
zig_u128 res;
if (zig_cmp_u128(rhs, zig_make_u128(0, bits)) >= INT32_C(0))
return zig_cmp_u128(lhs, zig_make_u128(0, 0)) != INT32_C(0) ? zig_maxInt_u(128, bits) : lhs;
return zig_shlo_u128(&res, lhs, (uint8_t)zig_lo_u128(rhs), bits) ? zig_maxInt_u(128, bits) : res;
if (zig_cmp_u128(rhs, zig_make_u128(0, bits)) < INT32_C(0) && !zig_shlo_u128(&res, lhs, (uint8_t)zig_lo_u128(rhs), bits)) return res;
switch (zig_cmp_u128(lhs, zig_make_u128(0, 0))) {
case 0: return zig_make_u128(0, 0);
case 1: return zig_maxInt_u(128, bits);
default: zig_unreachable();
}
}
static inline zig_i128 zig_shls_i128(zig_i128 lhs, zig_i128 rhs, uint8_t bits) {
static inline zig_i128 zig_shls_i128(zig_i128 lhs, zig_u128 rhs, uint8_t bits) {
zig_i128 res;
if (zig_cmp_u128(zig_bitCast_u128(rhs), zig_make_u128(0, bits)) < INT32_C(0) && !zig_shlo_i128(&res, lhs, (uint8_t)zig_lo_i128(rhs), bits)) return res;
return zig_cmp_i128(lhs, zig_make_i128(0, 0)) < INT32_C(0) ? zig_minInt_i(128, bits) : zig_maxInt_i(128, bits);
if (zig_cmp_u128(rhs, zig_make_u128(0, bits)) < INT32_C(0) && !zig_shlo_i128(&res, lhs, (uint8_t)zig_lo_u128(rhs), bits)) return res;
switch (zig_cmp_i128(lhs, zig_make_i128(0, 0))) {
case -1: return zig_minInt_i(128, bits);
case 0: return zig_make_i128(0, 0);
case 1: return zig_maxInt_i(128, bits);
default: zig_unreachable();
}
}
static inline zig_u128 zig_adds_u128(zig_u128 lhs, zig_u128 rhs, uint8_t bits) {

Binary file not shown.

View File

@ -96,7 +96,6 @@ test "@abs big int <= 128 bits" {
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
try comptime testAbsSignedBigInt();
try testAbsSignedBigInt();
@ -211,7 +210,6 @@ fn testAbsFloats(comptime T: type) !void {
test "@abs int vectors" {
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO

View File

@ -837,7 +837,6 @@ test "extern variable with non-pointer opaque type" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; // TODO
@export(&var_to_export, .{ .name = "opaque_extern_var" });

View File

@ -384,7 +384,6 @@ test "comptime bitcast with fields following f80" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
const FloatT = extern struct { f: f80, x: u128 align(16) };
const x: FloatT = .{ .f = 0.5, .x = 123 };

View File

@ -12,7 +12,6 @@ test "@bitReverse" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
try comptime testBitReverse();
@ -123,7 +122,6 @@ fn vector8() !void {
test "bitReverse vectors u8" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@ -144,7 +142,6 @@ fn vector16() !void {
test "bitReverse vectors u16" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@ -165,7 +162,6 @@ fn vector24() !void {
test "bitReverse vectors u24" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO

View File

@ -39,7 +39,6 @@ test "@byteSwap integers" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
const ByteSwapIntTest = struct {
fn run() !void {
@ -95,7 +94,6 @@ fn vector8() !void {
test "@byteSwap vectors u8" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@ -116,7 +114,6 @@ fn vector16() !void {
test "@byteSwap vectors u16" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@ -137,7 +134,6 @@ fn vector24() !void {
test "@byteSwap vectors u24" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO

View File

@ -617,7 +617,6 @@ test "@intCast on vector" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_llvm and builtin.cpu.arch == .hexagon) return error.SkipZigTest;
@ -2520,7 +2519,6 @@ test "@ptrFromInt on vector" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
const S = struct {
@ -2592,7 +2590,6 @@ test "@intFromFloat on vector" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
const S = struct {
fn doTheTest() !void {
@ -2693,7 +2690,6 @@ test "@intCast vector of signed integer" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_llvm and builtin.cpu.arch == .hexagon) return error.SkipZigTest;

View File

@ -5,7 +5,6 @@ const expect = std.testing.expect;
test "anyopaque extern symbol" {
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
const a = @extern(*anyopaque, .{ .name = "a_mystery_symbol" });

View File

@ -14,9 +14,11 @@ fn epsForType(comptime T: type) T {
}
test "add f16" {
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt == .coff and
!comptime std.Target.x86.featureSetHas(builtin.cpu.features, .f16c)) return error.SkipZigTest;
try testAdd(f16);
try comptime testAdd(f16);
}
@ -123,10 +125,12 @@ fn testMul(comptime T: type) !void {
test "cmp f16" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.cpu.arch.isArm() and builtin.target.abi.float() == .soft) return error.SkipZigTest; // https://github.com/ziglang/zig/issues/21234
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt == .coff and
!comptime std.Target.x86.featureSetHas(builtin.cpu.features, .f16c)) return error.SkipZigTest;
try testCmp(f16);
try comptime testCmp(f16);
}
@ -135,7 +139,6 @@ test "cmp f32" {
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.cpu.arch.isArm() and builtin.target.abi.float() == .soft) return error.SkipZigTest; // https://github.com/ziglang/zig/issues/21234
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
try testCmp(f32);
try comptime testCmp(f32);
@ -144,7 +147,6 @@ test "cmp f32" {
test "cmp f64" {
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
if (builtin.cpu.arch.isArm() and builtin.target.abi.float() == .soft) return error.SkipZigTest; // https://github.com/ziglang/zig/issues/21234
try testCmp(f64);
@ -340,9 +342,11 @@ test "different sized float comparisons" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt == .coff and
!comptime std.Target.x86.featureSetHas(builtin.cpu.features, .f16c)) return error.SkipZigTest;
try testDifferentSizedFloatComparisons();
try comptime testDifferentSizedFloatComparisons();
}
@ -388,10 +392,12 @@ test "@sqrt f16" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt == .coff and
!comptime std.Target.x86.featureSetHas(builtin.cpu.features, .f16c)) return error.SkipZigTest;
try testSqrt(f16);
try comptime testSqrt(f16);
}
@ -400,7 +406,6 @@ test "@sqrt f32/f64" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
try testSqrt(f32);
@ -1132,9 +1137,11 @@ test "@abs f16" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt == .coff and
!comptime std.Target.x86.featureSetHas(builtin.cpu.features, .f16c)) return error.SkipZigTest;
try testFabs(f16);
try comptime testFabs(f16);
}
@ -1266,9 +1273,11 @@ test "@floor f32/f64" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt == .coff and
!comptime std.Target.x86.featureSetHas(builtin.cpu.features, .sse4_1)) return error.SkipZigTest;
try testFloor(f32);
try comptime testFloor(f32);
try testFloor(f64);
@ -1332,7 +1341,9 @@ test "@floor with vectors" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt == .coff and
!comptime std.Target.x86.featureSetHas(builtin.cpu.features, .sse4_1)) return error.SkipZigTest;
try testFloorWithVectors();
try comptime testFloorWithVectors();
@ -1363,9 +1374,11 @@ test "@ceil f32/f64" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt == .coff and
!comptime std.Target.x86.featureSetHas(builtin.cpu.features, .sse4_1)) return error.SkipZigTest;
try testCeil(f32);
try comptime testCeil(f32);
try testCeil(f64);
@ -1429,7 +1442,9 @@ test "@ceil with vectors" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt == .coff and
!comptime std.Target.x86.featureSetHas(builtin.cpu.features, .sse4_1)) return error.SkipZigTest;
try testCeilWithVectors();
try comptime testCeilWithVectors();
@ -1460,9 +1475,11 @@ test "@trunc f32/f64" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt == .coff and
!comptime std.Target.x86.featureSetHas(builtin.cpu.features, .sse4_1)) return error.SkipZigTest;
try testTrunc(f32);
try comptime testTrunc(f32);
try testTrunc(f64);
@ -1526,7 +1543,9 @@ test "@trunc with vectors" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt == .coff and
!comptime std.Target.x86.featureSetHas(builtin.cpu.features, .sse4_1)) return error.SkipZigTest;
try testTruncWithVectors();
try comptime testTruncWithVectors();
@ -1546,9 +1565,11 @@ test "neg f16" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt == .coff and
!comptime std.Target.x86.featureSetHas(builtin.cpu.features, .f16c)) return error.SkipZigTest;
if (builtin.os.tag == .freebsd) {
// TODO file issue to track this failure
return error.SkipZigTest;

View File

@ -429,7 +429,6 @@ test "implicit cast function to function ptr" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
const S1 = struct {
export fn someFunctionThatReturnsAValue() c_int {

View File

@ -85,7 +85,6 @@ test "@clz big ints" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
try testClzBigInts();
@ -103,7 +102,6 @@ fn testOneClz(comptime T: type, x: T) u32 {
test "@clz vectors" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@ -173,7 +171,6 @@ fn testOneCtz(comptime T: type, x: T) u32 {
}
test "@ctz 128-bit integers" {
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@ -193,7 +190,6 @@ fn testCtz128() !void {
test "@ctz vectors" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@ -475,10 +471,12 @@ test "division" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt == .coff and
!comptime std.Target.x86.featureSetHas(builtin.cpu.features, .f16c)) return error.SkipZigTest;
try testIntDivision();
try comptime testIntDivision();
@ -1623,10 +1621,10 @@ test "vector integer addition" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
const S = struct {
fn doTheTest() !void {
@ -1694,9 +1692,6 @@ test "vector comparison" {
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 and
!comptime std.Target.x86.featureSetHas(builtin.cpu.features, .avx2)) return error.SkipZigTest;
const S = struct {
fn doTheTest() !void {
var a: @Vector(6, i32) = [_]i32{ 1, 3, -1, 5, 7, 9 };
@ -1785,7 +1780,6 @@ test "mod lazy values" {
test "@clz works on both vector and scalar inputs" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@ -1807,7 +1801,6 @@ test "runtime comparison to NaN is comptime-known" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.cpu.arch.isArm() and builtin.target.abi.float() == .soft) return error.SkipZigTest; // https://github.com/ziglang/zig/issues/21234
@ -1838,7 +1831,6 @@ test "runtime int comparison to inf is comptime-known" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.cpu.arch.isArm() and builtin.target.abi.float() == .soft) return error.SkipZigTest; // https://github.com/ziglang/zig/issues/21234
@ -1936,7 +1928,9 @@ test "float vector division of comptime zero by runtime nan is nan" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt == .coff and
!comptime std.Target.x86.featureSetHas(builtin.cpu.features, .sse4_1)) return error.SkipZigTest;
const ct_zero: @Vector(1, f32) = .{0};
var rt_nan: @Vector(1, f32) = .{math.nan(f32)};

View File

@ -34,7 +34,6 @@ test "@max on vectors" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
const S = struct {
fn doTheTest() !void {
@ -90,7 +89,6 @@ test "@min for vectors" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
const S = struct {
fn doTheTest() !void {
@ -206,7 +204,6 @@ test "@min/@max notices vector bounds" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
var x: @Vector(2, u16) = .{ 140, 40 };
const y: @Vector(2, u64) = .{ 5, 100 };
@ -260,7 +257,6 @@ test "@min/@max notices bounds from vector types" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
var x: @Vector(2, u16) = .{ 30, 67 };
var y: @Vector(2, u32) = .{ 20, 500 };
@ -303,7 +299,6 @@ test "@min/@max notices bounds from vector types when element of comptime-known
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
var x: @Vector(2, u32) = .{ 1_000_000, 12345 };
_ = &x;
@ -375,7 +370,6 @@ test "@min/@max with runtime vectors of signed and unsigned integers of same siz
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
const S = struct {
fn min(a: @Vector(2, i32), b: @Vector(2, u32)) @Vector(2, i32) {

View File

@ -6,10 +6,12 @@ test "@mulAdd" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt == .coff and
!comptime std.Target.x86.featureSetHas(builtin.cpu.features, .fma)) return error.SkipZigTest;
try comptime testMulAdd();
try testMulAdd();
}
@ -137,10 +139,12 @@ test "vector f32" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt == .coff and
!comptime std.Target.x86.featureSetHas(builtin.cpu.features, .fma)) return error.SkipZigTest;
try comptime vector32();
try vector32();
}
@ -163,10 +167,12 @@ test "vector f64" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt == .coff and
!comptime std.Target.x86.featureSetHas(builtin.cpu.features, .fma)) return error.SkipZigTest;
try comptime vector64();
try vector64();
}

View File

@ -1307,6 +1307,17 @@ test "packed struct equality" {
comptime try S.doTest(x, y);
}
test "packed struct equality ignores padding bits" {
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
const S = packed struct { b: bool };
var s: S = undefined;
s.b = true;
try std.testing.expect(s != S{ .b = false });
try std.testing.expect(s == S{ .b = true });
}
test "packed struct with signed field" {
var s: packed struct {
a: i2,

View File

@ -77,7 +77,6 @@ fn testPopCountIntegers() !void {
test "@popCount vectors" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO

View File

@ -41,8 +41,6 @@ test "@select arrays" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 and
!comptime std.Target.x86.featureSetHas(builtin.cpu.features, .avx2)) return error.SkipZigTest;
try comptime selectArrays();
try selectArrays();
@ -70,7 +68,6 @@ fn selectArrays() !void {
test "@select compare result" {
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_llvm and builtin.cpu.arch == .hexagon) return error.SkipZigTest;
const S = struct {

View File

@ -10,8 +10,6 @@ test "@shuffle int" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 and
!comptime std.Target.x86.featureSetHas(builtin.cpu.features, .ssse3)) return error.SkipZigTest;
const S = struct {
fn doTheTest() !void {
@ -53,7 +51,6 @@ test "@shuffle int" {
test "@shuffle int strange sizes" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@ -136,7 +133,6 @@ fn testShuffle(
test "@shuffle bool 1" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@ -160,7 +156,6 @@ test "@shuffle bool 1" {
test "@shuffle bool 2" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO

View File

@ -282,6 +282,7 @@ test "cast union to tag type of union" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
try testCastUnionToTag();
try comptime testCastUnionToTag();

View File

@ -31,7 +31,6 @@ test "vector wrap operators" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
const S = struct {
fn doTheTest() !void {
@ -76,12 +75,12 @@ test "vector bin compares with mem.eql" {
test "vector int operators" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
const S = struct {
fn doTheTest() !void {
@ -249,9 +248,11 @@ test "array to vector with element type coercion" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt == .coff and
!comptime std.Target.x86.featureSetHas(builtin.cpu.features, .f16c)) return error.SkipZigTest;
const S = struct {
fn doTheTest() !void {
var foo: f16 = 3.14;
@ -286,11 +287,11 @@ test "peer type resolution with coercible element types" {
test "tuple to vector" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
const S = struct {
fn doTheTest() !void {
@ -652,7 +653,6 @@ test "vector division operators" {
test "vector bitwise not operator" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@ -684,12 +684,12 @@ test "vector bitwise not operator" {
test "vector shift operators" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
const S = struct {
fn doTheTestShift(x: anytype, y: anytype) !void {
@ -908,8 +908,6 @@ test "mask parameter of @shuffle is comptime scope" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 and
!comptime std.Target.x86.featureSetHas(builtin.cpu.features, .ssse3)) return error.SkipZigTest;
const __v4hi = @Vector(4, i16);
var v4_a = __v4hi{ 1, 2, 3, 4 };
@ -934,7 +932,6 @@ test "saturating add" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
const S = struct {
fn doTheTest() !void {
@ -969,7 +966,6 @@ test "saturating subtraction" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
const S = struct {
fn doTheTest() !void {
@ -989,7 +985,6 @@ test "saturating subtraction" {
test "saturating multiplication" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@ -1018,12 +1013,12 @@ test "saturating multiplication" {
test "saturating shift-left" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
const S = struct {
fn doTheTest() !void {
@ -1043,12 +1038,12 @@ test "saturating shift-left" {
test "multiplication-assignment operator with an array operand" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
const S = struct {
fn doTheTest() !void {
@ -1065,7 +1060,6 @@ test "multiplication-assignment operator with an array operand" {
test "@addWithOverflow" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@ -1116,7 +1110,6 @@ test "@addWithOverflow" {
test "@subWithOverflow" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@ -1151,7 +1144,6 @@ test "@subWithOverflow" {
test "@mulWithOverflow" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@ -1175,7 +1167,6 @@ test "@mulWithOverflow" {
test "@shlWithOverflow" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@ -1314,7 +1305,7 @@ test "zero multiplicand" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
const zeros = @Vector(2, u32){ 0.0, 0.0 };
var ones = @Vector(2, u32){ 1.0, 1.0 };
@ -1362,7 +1353,6 @@ test "array operands to shuffle are coerced to vectors" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const mask = [5]i32{ -1, 0, 1, 2, 3 };
@ -1469,7 +1459,6 @@ test "compare vectors with different element types" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;

View File

@ -1,5 +1,7 @@
const AddOneBit = math.AddOneBit;
const AsSignedness = math.AsSignedness;
const cast = math.cast;
const ChangeScalar = math.ChangeScalar;
const checkExpected = math.checkExpected;
const Compare = math.Compare;
const DoubleBits = math.DoubleBits;
@ -13,6 +15,7 @@ const math = @import("math.zig");
const nan = math.nan;
const Scalar = math.Scalar;
const sign = math.sign;
const splat = math.splat;
const Sse = math.Sse;
const tmin = math.tmin;
@ -5141,6 +5144,7 @@ inline fn mulSat(comptime Type: type, lhs: Type, rhs: Type) Type {
test mulSat {
const test_mul_sat = binary(mulSat, .{});
try test_mul_sat.testInts();
try test_mul_sat.testIntVectors();
}
inline fn multiply(comptime Type: type, lhs: Type, rhs: Type) @TypeOf(lhs * rhs) {
@ -5240,38 +5244,42 @@ test min {
try test_min.testFloatVectors();
}
inline fn addWithOverflow(comptime Type: type, lhs: Type, rhs: Type) struct { Type, u1 } {
inline fn addWithOverflow(comptime Type: type, lhs: Type, rhs: Type) struct { Type, ChangeScalar(Type, u1) } {
return @addWithOverflow(lhs, rhs);
}
test addWithOverflow {
const test_add_with_overflow = binary(addWithOverflow, .{});
try test_add_with_overflow.testInts();
try test_add_with_overflow.testIntVectors();
}
inline fn subWithOverflow(comptime Type: type, lhs: Type, rhs: Type) struct { Type, u1 } {
inline fn subWithOverflow(comptime Type: type, lhs: Type, rhs: Type) struct { Type, ChangeScalar(Type, u1) } {
return @subWithOverflow(lhs, rhs);
}
test subWithOverflow {
const test_sub_with_overflow = binary(subWithOverflow, .{});
try test_sub_with_overflow.testInts();
try test_sub_with_overflow.testIntVectors();
}
inline fn mulWithOverflow(comptime Type: type, lhs: Type, rhs: Type) struct { Type, u1 } {
inline fn mulWithOverflow(comptime Type: type, lhs: Type, rhs: Type) struct { Type, ChangeScalar(Type, u1) } {
return @mulWithOverflow(lhs, rhs);
}
test mulWithOverflow {
const test_mul_with_overflow = binary(mulWithOverflow, .{});
try test_mul_with_overflow.testInts();
try test_mul_with_overflow.testIntVectors();
}
inline fn shlWithOverflow(comptime Type: type, lhs: Type, rhs: Type) struct { Type, u1 } {
const bit_cast_rhs: @Type(.{ .int = .{ .signedness = .unsigned, .bits = @bitSizeOf(Type) } }) = @bitCast(rhs);
inline fn shlWithOverflow(comptime Type: type, lhs: Type, rhs: Type) struct { Type, ChangeScalar(Type, u1) } {
const bit_cast_rhs: AsSignedness(Type, .unsigned) = @bitCast(rhs);
const truncate_rhs: Log2Int(Type) = @truncate(bit_cast_rhs);
return @shlWithOverflow(lhs, if (comptime cast(Log2Int(Type), @bitSizeOf(Type))) |bits| truncate_rhs % bits else truncate_rhs);
return @shlWithOverflow(lhs, if (comptime cast(Log2Int(Scalar(Type)), @bitSizeOf(Scalar(Type)))) |bits| truncate_rhs % splat(Log2Int(Type), bits) else truncate_rhs);
}
test shlWithOverflow {
const test_shl_with_overflow = binary(shlWithOverflow, .{});
try test_shl_with_overflow.testInts();
try test_shl_with_overflow.testIntVectors();
}
inline fn equal(comptime Type: type, lhs: Type, rhs: Type) @TypeOf(lhs == rhs) {
@ -5280,7 +5288,9 @@ inline fn equal(comptime Type: type, lhs: Type, rhs: Type) @TypeOf(lhs == rhs) {
test equal {
const test_equal = binary(equal, .{});
try test_equal.testInts();
try test_equal.testIntVectors();
try test_equal.testFloats();
try test_equal.testFloatVectors();
}
inline fn notEqual(comptime Type: type, lhs: Type, rhs: Type) @TypeOf(lhs != rhs) {
@ -5289,7 +5299,9 @@ inline fn notEqual(comptime Type: type, lhs: Type, rhs: Type) @TypeOf(lhs != rhs
test notEqual {
const test_not_equal = binary(notEqual, .{});
try test_not_equal.testInts();
try test_not_equal.testIntVectors();
try test_not_equal.testFloats();
try test_not_equal.testFloatVectors();
}
inline fn lessThan(comptime Type: type, lhs: Type, rhs: Type) @TypeOf(lhs < rhs) {
@ -5298,7 +5310,9 @@ inline fn lessThan(comptime Type: type, lhs: Type, rhs: Type) @TypeOf(lhs < rhs)
test lessThan {
const test_less_than = binary(lessThan, .{});
try test_less_than.testInts();
try test_less_than.testIntVectors();
try test_less_than.testFloats();
try test_less_than.testFloatVectors();
}
inline fn lessThanOrEqual(comptime Type: type, lhs: Type, rhs: Type) @TypeOf(lhs <= rhs) {
@ -5307,7 +5321,9 @@ inline fn lessThanOrEqual(comptime Type: type, lhs: Type, rhs: Type) @TypeOf(lhs
test lessThanOrEqual {
const test_less_than_or_equal = binary(lessThanOrEqual, .{});
try test_less_than_or_equal.testInts();
try test_less_than_or_equal.testIntVectors();
try test_less_than_or_equal.testFloats();
try test_less_than_or_equal.testFloatVectors();
}
inline fn greaterThan(comptime Type: type, lhs: Type, rhs: Type) @TypeOf(lhs > rhs) {
@ -5316,7 +5332,9 @@ inline fn greaterThan(comptime Type: type, lhs: Type, rhs: Type) @TypeOf(lhs > r
test greaterThan {
const test_greater_than = binary(greaterThan, .{});
try test_greater_than.testInts();
try test_greater_than.testIntVectors();
try test_greater_than.testFloats();
try test_greater_than.testFloatVectors();
}
inline fn greaterThanOrEqual(comptime Type: type, lhs: Type, rhs: Type) @TypeOf(lhs >= rhs) {
@ -5325,7 +5343,9 @@ inline fn greaterThanOrEqual(comptime Type: type, lhs: Type, rhs: Type) @TypeOf(
test greaterThanOrEqual {
const test_greater_than_or_equal = binary(greaterThanOrEqual, .{});
try test_greater_than_or_equal.testInts();
try test_greater_than_or_equal.testIntVectors();
try test_greater_than_or_equal.testFloats();
try test_greater_than_or_equal.testFloatVectors();
}
inline fn bitAnd(comptime Type: type, lhs: Type, rhs: Type) @TypeOf(lhs & rhs) {
@ -5347,54 +5367,57 @@ test bitOr {
}
inline fn shr(comptime Type: type, lhs: Type, rhs: Type) Type {
const bit_cast_rhs: @Type(.{ .int = .{ .signedness = .unsigned, .bits = @bitSizeOf(Type) } }) = @bitCast(rhs);
const bit_cast_rhs: AsSignedness(Type, .unsigned) = @bitCast(rhs);
const truncate_rhs: Log2Int(Type) = @truncate(bit_cast_rhs);
return lhs >> if (comptime cast(Log2Int(Type), @bitSizeOf(Type))) |bits| truncate_rhs % bits else truncate_rhs;
return lhs >> if (comptime cast(Log2Int(Scalar(Type)), @bitSizeOf(Scalar(Type)))) |bits| truncate_rhs % splat(Log2Int(Type), bits) else truncate_rhs;
}
test shr {
const test_shr = binary(shr, .{});
try test_shr.testInts();
try test_shr.testIntVectors();
}
inline fn shrExact(comptime Type: type, lhs: Type, rhs: Type) Type {
const bit_cast_rhs: @Type(.{ .int = .{ .signedness = .unsigned, .bits = @bitSizeOf(Type) } }) = @bitCast(rhs);
const bit_cast_rhs: AsSignedness(Type, .unsigned) = @bitCast(rhs);
const truncate_rhs: Log2Int(Type) = @truncate(bit_cast_rhs);
const final_rhs = if (comptime cast(Log2Int(Type), @bitSizeOf(Type))) |bits| truncate_rhs % bits else truncate_rhs;
const final_rhs = if (comptime cast(Log2Int(Scalar(Type)), @bitSizeOf(Scalar(Type)))) |bits| truncate_rhs % splat(Log2Int(Type), bits) else truncate_rhs;
return @shrExact(lhs >> final_rhs << final_rhs, final_rhs);
}
test shrExact {
const test_shr_exact = binary(shrExact, .{});
try test_shr_exact.testInts();
try test_shr_exact.testIntVectors();
}
inline fn shl(comptime Type: type, lhs: Type, rhs: Type) Type {
const bit_cast_rhs: @Type(.{ .int = .{ .signedness = .unsigned, .bits = @bitSizeOf(Type) } }) = @bitCast(rhs);
const bit_cast_rhs: AsSignedness(Type, .unsigned) = @bitCast(rhs);
const truncate_rhs: Log2Int(Type) = @truncate(bit_cast_rhs);
return lhs << if (comptime cast(Log2Int(Type), @bitSizeOf(Type))) |bits| truncate_rhs % bits else truncate_rhs;
return lhs << if (comptime cast(Log2Int(Scalar(Type)), @bitSizeOf(Scalar(Type)))) |bits| truncate_rhs % splat(Log2Int(Type), bits) else truncate_rhs;
}
test shl {
const test_shl = binary(shl, .{});
try test_shl.testInts();
try test_shl.testIntVectors();
}
inline fn shlExactUnsafe(comptime Type: type, lhs: Type, rhs: Type) Type {
@setRuntimeSafety(false);
const bit_cast_rhs: @Type(.{ .int = .{ .signedness = .unsigned, .bits = @bitSizeOf(Type) } }) = @bitCast(rhs);
const bit_cast_rhs: AsSignedness(Type, .unsigned) = @bitCast(rhs);
const truncate_rhs: Log2Int(Type) = @truncate(bit_cast_rhs);
const final_rhs = if (comptime cast(Log2Int(Type), @bitSizeOf(Type))) |bits| truncate_rhs % bits else truncate_rhs;
const final_rhs = if (comptime cast(Log2Int(Scalar(Type)), @bitSizeOf(Scalar(Type)))) |bits| truncate_rhs % splat(Log2Int(Type), bits) else truncate_rhs;
return @shlExact(lhs << final_rhs >> final_rhs, final_rhs);
}
test shlExactUnsafe {
const test_shl_exact_unsafe = binary(shlExactUnsafe, .{});
try test_shl_exact_unsafe.testInts();
try test_shl_exact_unsafe.testIntVectors();
}
inline fn shlSat(comptime Type: type, lhs: Type, rhs: Type) Type {
// workaround https://github.com/ziglang/zig/issues/23034
if (@inComptime()) {
// workaround https://github.com/ziglang/zig/issues/23139
//return lhs <<| @min(@abs(rhs), imax(u64));
return lhs <<| @min(@abs(rhs), @as(u64, imax(u64)));
return lhs <<| @min(@abs(rhs), splat(ChangeScalar(Type, u64), imax(u64)));
}
// workaround https://github.com/ziglang/zig/issues/23033
@setRuntimeSafety(false);
@ -5403,6 +5426,7 @@ inline fn shlSat(comptime Type: type, lhs: Type, rhs: Type) Type {
test shlSat {
const test_shl_sat = binary(shlSat, .{});
try test_shl_sat.testInts();
try test_shl_sat.testIntVectors();
}
inline fn bitXor(comptime Type: type, lhs: Type, rhs: Type) @TypeOf(lhs ^ rhs) {

View File

@ -8,8 +8,6 @@ pub const fmin = math.floatMin;
pub const imax = math.maxInt;
pub const imin = math.minInt;
pub const inf = math.inf;
pub const Log2Int = math.Log2Int;
pub const Log2IntCeil = math.Log2IntCeil;
pub const nan = math.nan;
pub const next = math.nextAfter;
pub const tmin = math.floatTrueMin;
@ -30,38 +28,44 @@ pub fn Scalar(comptime Type: type) type {
.vector => |info| info.child,
};
}
pub fn ChangeScalar(comptime Type: type, comptime NewScalar: type) type {
return switch (@typeInfo(Type)) {
else => NewScalar,
.vector => |vector| @Vector(vector.len, NewScalar),
};
}
pub fn AsSignedness(comptime Type: type, comptime signedness: std.builtin.Signedness) type {
return ChangeScalar(Type, @Type(.{ .int = .{
.signedness = signedness,
.bits = @typeInfo(Scalar(Type)).int.bits,
} }));
}
pub fn AddOneBit(comptime Type: type) type {
const ResultScalar = switch (@typeInfo(Scalar(Type))) {
return ChangeScalar(Type, switch (@typeInfo(Scalar(Type))) {
.int => |int| @Type(.{ .int = .{ .signedness = int.signedness, .bits = 1 + int.bits } }),
.float => Scalar(Type),
else => @compileError(@typeName(Type)),
};
return switch (@typeInfo(Type)) {
else => ResultScalar,
.vector => |vector| @Vector(vector.len, ResultScalar),
};
});
}
pub fn DoubleBits(comptime Type: type) type {
const ResultScalar = switch (@typeInfo(Scalar(Type))) {
return ChangeScalar(Type, switch (@typeInfo(Scalar(Type))) {
.int => |int| @Type(.{ .int = .{ .signedness = int.signedness, .bits = int.bits * 2 } }),
.float => Scalar(Type),
else => @compileError(@typeName(Type)),
};
return switch (@typeInfo(Type)) {
else => ResultScalar,
.vector => |vector| @Vector(vector.len, ResultScalar),
};
});
}
pub fn RoundBitsUp(comptime Type: type, comptime multiple: u16) type {
const ResultScalar = switch (@typeInfo(Scalar(Type))) {
return ChangeScalar(Type, switch (@typeInfo(Scalar(Type))) {
.int => |int| @Type(.{ .int = .{ .signedness = int.signedness, .bits = std.mem.alignForward(u16, int.bits, multiple) } }),
.float => Scalar(Type),
else => @compileError(@typeName(Type)),
};
return switch (@typeInfo(Type)) {
else => ResultScalar,
.vector => |vector| @Vector(vector.len, ResultScalar),
};
});
}
pub fn Log2Int(comptime Type: type) type {
return ChangeScalar(Type, math.Log2Int(Scalar(Type)));
}
pub fn Log2IntCeil(comptime Type: type) type {
return ChangeScalar(Type, math.Log2IntCeil(Scalar(Type)));
}
// inline to avoid a runtime `@splat`
pub inline fn splat(comptime Type: type, scalar: Scalar(Type)) Type {
@ -78,18 +82,12 @@ inline fn select(cond: anytype, lhs: anytype, rhs: @TypeOf(lhs)) @TypeOf(lhs) {
else => @compileError(@typeName(@TypeOf(cond))),
};
}
pub fn sign(rhs: anytype) switch (@typeInfo(@TypeOf(rhs))) {
else => bool,
.vector => |vector| @Vector(vector.len, bool),
} {
pub fn sign(rhs: anytype) ChangeScalar(@TypeOf(rhs), bool) {
const ScalarInt = @Type(.{ .int = .{
.signedness = .unsigned,
.bits = @bitSizeOf(Scalar(@TypeOf(rhs))),
} });
const VectorInt = switch (@typeInfo(@TypeOf(rhs))) {
else => ScalarInt,
.vector => |vector| @Vector(vector.len, ScalarInt),
};
const VectorInt = ChangeScalar(@TypeOf(rhs), ScalarInt);
return @as(VectorInt, @bitCast(rhs)) & splat(VectorInt, @as(ScalarInt, 1) << @bitSizeOf(ScalarInt) - 1) != splat(VectorInt, 0);
}
fn boolAnd(lhs: anytype, rhs: @TypeOf(lhs)) @TypeOf(lhs) {

View File

@ -4828,6 +4828,7 @@ inline fn ctz(comptime Type: type, rhs: Type) @TypeOf(@ctz(rhs)) {
test ctz {
const test_ctz = unary(ctz, .{});
try test_ctz.testInts();
try test_ctz.testIntVectors();
}
inline fn popCount(comptime Type: type, rhs: Type) @TypeOf(@popCount(rhs)) {
@ -4836,6 +4837,7 @@ inline fn popCount(comptime Type: type, rhs: Type) @TypeOf(@popCount(rhs)) {
test popCount {
const test_pop_count = unary(popCount, .{});
try test_pop_count.testInts();
try test_pop_count.testIntVectors();
}
inline fn byteSwap(comptime Type: type, rhs: Type) RoundBitsUp(Type, 8) {
@ -4844,6 +4846,7 @@ inline fn byteSwap(comptime Type: type, rhs: Type) RoundBitsUp(Type, 8) {
test byteSwap {
const test_byte_swap = unary(byteSwap, .{});
try test_byte_swap.testInts();
try test_byte_swap.testIntVectors();
}
inline fn bitReverse(comptime Type: type, rhs: Type) @TypeOf(@bitReverse(rhs)) {
@ -4852,6 +4855,7 @@ inline fn bitReverse(comptime Type: type, rhs: Type) @TypeOf(@bitReverse(rhs)) {
test bitReverse {
const test_bit_reverse = unary(bitReverse, .{});
try test_bit_reverse.testInts();
try test_bit_reverse.testIntVectors();
}
inline fn sqrt(comptime Type: type, rhs: Type) @TypeOf(@sqrt(rhs)) {

View File

@ -117,9 +117,9 @@ export fn testMutablePointer() void {
// tmp.zig:37:38: note: imported here
// neg_inf.zon:1:1: error: expected type '?u8'
// tmp.zig:57:28: note: imported here
// neg_inf.zon:1:1: error: expected type 'tmp.testNonExhaustiveEnum__enum_518'
// neg_inf.zon:1:1: error: expected type 'tmp.testNonExhaustiveEnum__enum_522'
// tmp.zig:62:39: note: imported here
// neg_inf.zon:1:1: error: expected type 'tmp.testUntaggedUnion__union_520'
// neg_inf.zon:1:1: error: expected type 'tmp.testUntaggedUnion__union_524'
// tmp.zig:67:44: note: imported here
// neg_inf.zon:1:1: error: expected type 'tmp.testTaggedUnionVoid__union_523'
// neg_inf.zon:1:1: error: expected type 'tmp.testTaggedUnionVoid__union_527'
// tmp.zig:72:50: note: imported here

View File

@ -15,6 +15,6 @@ pub export fn entry() void {
// error
//
// :7:25: error: unable to resolve comptime value
// :7:25: note: initializer of comptime-only struct 'tmp.S.foo__anon_492.C' must be comptime-known
// :7:25: note: initializer of comptime-only struct 'tmp.S.foo__anon_496.C' must be comptime-known
// :4:16: note: struct requires comptime because of this field
// :4:16: note: types are not available at runtime

View File

@ -15,8 +15,7 @@ pub const panic = struct {
pub const castToNull = simple_panic.castToNull;
pub const incorrectAlignment = simple_panic.incorrectAlignment;
pub const invalidErrorCode = simple_panic.invalidErrorCode;
pub const castTruncatedData = simple_panic.castTruncatedData;
pub const negativeToUnsigned = simple_panic.negativeToUnsigned;
pub const integerOutOfBounds = simple_panic.integerOutOfBounds;
pub const integerOverflow = simple_panic.integerOverflow;
pub const shlOverflow = simple_panic.shlOverflow;
pub const shrOverflow = simple_panic.shrOverflow;
@ -27,8 +26,6 @@ pub const panic = struct {
pub const shiftRhsTooBig = simple_panic.shiftRhsTooBig;
pub const invalidEnumValue = simple_panic.invalidEnumValue;
pub const forLenMismatch = simple_panic.forLenMismatch;
/// Delete after next zig1.wasm update
pub const memcpyLenMismatch = copyLenMismatch;
pub const copyLenMismatch = simple_panic.copyLenMismatch;
pub const memcpyAlias = simple_panic.memcpyAlias;
pub const noreturnReturned = simple_panic.noreturnReturned;

View File

@ -11,8 +11,7 @@ pub const panic = struct {
pub const castToNull = simple_panic.castToNull;
pub const incorrectAlignment = simple_panic.incorrectAlignment;
pub const invalidErrorCode = simple_panic.invalidErrorCode;
pub const castTruncatedData = simple_panic.castTruncatedData;
pub const negativeToUnsigned = simple_panic.negativeToUnsigned;
pub const integerOutOfBounds = simple_panic.integerOutOfBounds;
pub const integerOverflow = simple_panic.integerOverflow;
pub const shlOverflow = simple_panic.shlOverflow;
pub const shrOverflow = simple_panic.shrOverflow;
@ -23,8 +22,6 @@ pub const panic = struct {
pub const shiftRhsTooBig = simple_panic.shiftRhsTooBig;
pub const invalidEnumValue = simple_panic.invalidEnumValue;
pub const forLenMismatch = simple_panic.forLenMismatch;
/// Delete after next zig1.wasm update
pub const memcpyLenMismatch = copyLenMismatch;
pub const copyLenMismatch = simple_panic.copyLenMismatch;
pub const memcpyAlias = simple_panic.memcpyAlias;
pub const noreturnReturned = simple_panic.noreturnReturned;

View File

@ -16,5 +16,5 @@ pub export fn entry2() void {
//
// :3:6: error: no field or member function named 'copy' in '[]const u8'
// :9:8: error: no field or member function named 'bar' in '@TypeOf(.{})'
// :12:18: error: no field or member function named 'bar' in 'tmp.entry2__struct_496'
// :12:18: error: no field or member function named 'bar' in 'tmp.entry2__struct_500'
// :12:6: note: struct declared here

View File

@ -6,6 +6,6 @@ export fn foo() void {
// error
//
// :4:16: error: expected type 'tmp.T', found 'tmp.foo__struct_485'
// :4:16: error: expected type 'tmp.T', found 'tmp.foo__struct_489'
// :3:16: note: struct declared here
// :1:11: note: struct declared here

View File

@ -44,9 +44,9 @@ comptime {
//
// :5:23: error: expected error union type, found 'comptime_int'
// :10:23: error: expected error union type, found '@TypeOf(.{})'
// :15:23: error: expected error union type, found 'tmp.test2__struct_522'
// :15:23: error: expected error union type, found 'tmp.test2__struct_526'
// :15:23: note: struct declared here
// :20:27: error: expected error union type, found 'tmp.test3__struct_524'
// :20:27: error: expected error union type, found 'tmp.test3__struct_528'
// :20:27: note: struct declared here
// :25:23: error: expected error union type, found 'struct { comptime *const [5:0]u8 = "hello" }'
// :31:13: error: expected error union type, found 'u32'

View File

@ -1,14 +1,20 @@
export fn entry() void {
const v: @Vector(4, u32) = [4]u32{ 10, 11, 12, 13 };
const x: @Vector(4, u32) = [4]u32{ 14, 15, 16, 17 };
const z = @shuffle(u32, v, x, [8]i32{ 0, 1, 2, 3, 7, 6, 5, 4 });
_ = z;
export fn foo() void {
// Here, the bad index ('7') is not less than 'b.len', so the error shouldn't have a note suggesting a negative index.
const a: @Vector(4, u32) = .{ 10, 11, 12, 13 };
const b: @Vector(4, u32) = .{ 14, 15, 16, 17 };
_ = @shuffle(u32, a, b, [8]i32{ 0, 1, 2, 3, 7, 6, 5, 4 });
}
export fn bar() void {
// Here, the bad index ('7') *is* less than 'b.len', so the error *should* have a note suggesting a negative index.
const a: @Vector(4, u32) = .{ 10, 11, 12, 13 };
const b: @Vector(9, u32) = .{ 14, 15, 16, 17, 18, 19, 20, 21, 22 };
_ = @shuffle(u32, a, b, [8]i32{ 0, 1, 2, 3, 7, 6, 5, 4 });
}
// error
// backend=stage2
// target=native
//
// :4:41: error: mask index '4' has out-of-bounds selection
// :4:29: note: selected index '7' out of bounds of '@Vector(4, u32)'
// :4:32: note: selections from the second vector are specified with negative numbers
// :5:35: error: mask element at index '4' selects out-of-bounds index
// :5:23: note: index '7' exceeds bounds of '@Vector(4, u32)' given here
// :11:35: error: mask element at index '4' selects out-of-bounds index
// :11:23: note: index '7' exceeds bounds of '@Vector(4, u32)' given here
// :11:26: note: use '~@as(u32, 7)' to index into second vector given here

View File

@ -2,7 +2,7 @@ const std = @import("std");
pub fn panic(message: []const u8, stack_trace: ?*std.builtin.StackTrace, _: ?usize) noreturn {
_ = stack_trace;
if (std.mem.eql(u8, message, "integer cast truncated bits")) {
if (std.mem.eql(u8, message, "integer does not fit in destination type")) {
std.process.exit(0);
}
std.process.exit(1);

View File

@ -15,5 +15,5 @@ pub fn main() !void {
return error.TestFailed;
}
// run
// backend=llvm
// backend=stage2,llvm
// target=native

View File

@ -2,7 +2,7 @@ const std = @import("std");
pub fn panic(message: []const u8, stack_trace: ?*std.builtin.StackTrace, _: ?usize) noreturn {
_ = stack_trace;
if (std.mem.eql(u8, message, "attempt to cast negative value to unsigned integer")) {
if (std.mem.eql(u8, message, "integer does not fit in destination type")) {
std.process.exit(0);
}
std.process.exit(1);

View File

@ -2,7 +2,7 @@ const std = @import("std");
pub fn panic(message: []const u8, stack_trace: ?*std.builtin.StackTrace, _: ?usize) noreturn {
_ = stack_trace;
if (std.mem.eql(u8, message, "attempt to cast negative value to unsigned integer")) {
if (std.mem.eql(u8, message, "integer does not fit in destination type")) {
std.process.exit(0);
}
std.process.exit(1);

View File

@ -2,7 +2,7 @@ const std = @import("std");
pub fn panic(message: []const u8, stack_trace: ?*std.builtin.StackTrace, _: ?usize) noreturn {
_ = stack_trace;
if (std.mem.eql(u8, message, "attempt to cast negative value to unsigned integer")) {
if (std.mem.eql(u8, message, "integer does not fit in destination type")) {
std.process.exit(0);
}
std.process.exit(1);

View File

@ -23,4 +23,5 @@ pub fn panic(message: []const u8, _: ?*std.builtin.StackTrace, _: ?usize) noretu
const std = @import("std");
// run
// backend=llvm
// backend=stage2,llvm
// target=x86_64-linux

View File

@ -23,4 +23,5 @@ pub fn panic(message: []const u8, _: ?*std.builtin.StackTrace, _: ?usize) noretu
const std = @import("std");
// run
// backend=llvm
// backend=stage2,llvm
// target=x86_64-linux

View File

@ -23,4 +23,5 @@ pub fn panic(message: []const u8, _: ?*std.builtin.StackTrace, _: ?usize) noretu
const std = @import("std");
// run
// backend=llvm
// backend=stage2,llvm
// target=x86_64-linux

View File

@ -2,7 +2,7 @@ const std = @import("std");
pub fn panic(message: []const u8, stack_trace: ?*std.builtin.StackTrace, _: ?usize) noreturn {
_ = stack_trace;
if (std.mem.eql(u8, message, "integer cast truncated bits")) {
if (std.mem.eql(u8, message, "integer does not fit in destination type")) {
std.process.exit(0);
}
std.process.exit(1);
@ -17,5 +17,5 @@ pub fn main() !void {
}
// run
// backend=llvm
// backend=stage2,llvm
// target=native

View File

@ -2,7 +2,7 @@ const std = @import("std");
pub fn panic(message: []const u8, stack_trace: ?*std.builtin.StackTrace, _: ?usize) noreturn {
_ = stack_trace;
if (std.mem.eql(u8, message, "integer cast truncated bits")) {
if (std.mem.eql(u8, message, "integer does not fit in destination type")) {
std.process.exit(0);
}
std.process.exit(1);

View File

@ -2,7 +2,7 @@ const std = @import("std");
pub fn panic(message: []const u8, stack_trace: ?*std.builtin.StackTrace, _: ?usize) noreturn {
_ = stack_trace;
if (std.mem.eql(u8, message, "integer cast truncated bits")) {
if (std.mem.eql(u8, message, "integer does not fit in destination type")) {
std.process.exit(0);
}
std.process.exit(1);
@ -17,5 +17,5 @@ pub fn main() !void {
}
// run
// backend=llvm
// backend=stage2,llvm
// target=native

View File

@ -2,7 +2,7 @@ const std = @import("std");
pub fn panic(message: []const u8, stack_trace: ?*std.builtin.StackTrace, _: ?usize) noreturn {
_ = stack_trace;
if (std.mem.eql(u8, message, "integer cast truncated bits")) {
if (std.mem.eql(u8, message, "integer does not fit in destination type")) {
std.process.exit(0);
}
std.process.exit(1);

View File

@ -2,7 +2,7 @@ const std = @import("std");
pub fn panic(message: []const u8, stack_trace: ?*std.builtin.StackTrace, _: ?usize) noreturn {
_ = stack_trace;
if (std.mem.eql(u8, message, "integer cast truncated bits")) {
if (std.mem.eql(u8, message, "integer does not fit in destination type")) {
std.process.exit(0);
}
std.process.exit(1);

View File

@ -18,5 +18,5 @@ fn add(a: @Vector(4, i32), b: @Vector(4, i32)) @Vector(4, i32) {
return a + b;
}
// run
// backend=llvm
// backend=stage2,llvm
// target=native

View File

@ -18,5 +18,5 @@ fn mul(a: @Vector(4, u8), b: @Vector(4, u8)) @Vector(4, u8) {
return a * b;
}
// run
// backend=llvm
// backend=stage2,llvm
// target=native

View File

@ -18,5 +18,5 @@ fn neg(a: @Vector(4, i16)) @Vector(4, i16) {
return -a;
}
// run
// backend=llvm
// backend=stage2,llvm
// target=native

View File

@ -18,5 +18,5 @@ fn sub(a: @Vector(4, u32), b: @Vector(4, u32)) @Vector(4, u32) {
return a - b;
}
// run
// backend=llvm
// backend=stage2,llvm
// target=native

View File

@ -26,8 +26,7 @@ pub const panic = struct {
pub const castToNull = no_panic.castToNull;
pub const incorrectAlignment = no_panic.incorrectAlignment;
pub const invalidErrorCode = no_panic.invalidErrorCode;
pub const castTruncatedData = no_panic.castTruncatedData;
pub const negativeToUnsigned = no_panic.negativeToUnsigned;
pub const integerOutOfBounds = no_panic.integerOutOfBounds;
pub const shlOverflow = no_panic.shlOverflow;
pub const shrOverflow = no_panic.shrOverflow;
pub const divideByZero = no_panic.divideByZero;
@ -37,8 +36,6 @@ pub const panic = struct {
pub const shiftRhsTooBig = no_panic.shiftRhsTooBig;
pub const invalidEnumValue = no_panic.invalidEnumValue;
pub const forLenMismatch = no_panic.forLenMismatch;
/// Delete after next zig1.wasm update
pub const memcpyLenMismatch = copyLenMismatch;
pub const copyLenMismatch = no_panic.copyLenMismatch;
pub const memcpyAlias = no_panic.memcpyAlias;
pub const noreturnReturned = no_panic.noreturnReturned;
@ -75,8 +72,7 @@ pub const panic = struct {
pub const castToNull = no_panic.castToNull;
pub const incorrectAlignment = no_panic.incorrectAlignment;
pub const invalidErrorCode = no_panic.invalidErrorCode;
pub const castTruncatedData = no_panic.castTruncatedData;
pub const negativeToUnsigned = no_panic.negativeToUnsigned;
pub const integerOutOfBounds = no_panic.integerOutOfBounds;
pub const shlOverflow = no_panic.shlOverflow;
pub const shrOverflow = no_panic.shrOverflow;
pub const divideByZero = no_panic.divideByZero;
@ -86,8 +82,6 @@ pub const panic = struct {
pub const shiftRhsTooBig = no_panic.shiftRhsTooBig;
pub const invalidEnumValue = no_panic.invalidEnumValue;
pub const forLenMismatch = no_panic.forLenMismatch;
/// Delete after next zig1.wasm update
pub const memcpyLenMismatch = copyLenMismatch;
pub const copyLenMismatch = no_panic.copyLenMismatch;
pub const memcpyAlias = no_panic.memcpyAlias;
pub const noreturnReturned = no_panic.noreturnReturned;
@ -124,8 +118,7 @@ pub const panic = struct {
pub const castToNull = no_panic.castToNull;
pub const incorrectAlignment = no_panic.incorrectAlignment;
pub const invalidErrorCode = no_panic.invalidErrorCode;
pub const castTruncatedData = no_panic.castTruncatedData;
pub const negativeToUnsigned = no_panic.negativeToUnsigned;
pub const integerOutOfBounds = no_panic.integerOutOfBounds;
pub const shlOverflow = no_panic.shlOverflow;
pub const shrOverflow = no_panic.shrOverflow;
pub const divideByZero = no_panic.divideByZero;
@ -135,8 +128,6 @@ pub const panic = struct {
pub const shiftRhsTooBig = no_panic.shiftRhsTooBig;
pub const invalidEnumValue = no_panic.invalidEnumValue;
pub const forLenMismatch = no_panic.forLenMismatch;
/// Delete after next zig1.wasm update
pub const memcpyLenMismatch = copyLenMismatch;
pub const copyLenMismatch = no_panic.copyLenMismatch;
pub const memcpyAlias = no_panic.memcpyAlias;
pub const noreturnReturned = no_panic.noreturnReturned;

View File

@ -400,7 +400,7 @@ fn addFromDirInner(
for (targets) |target_query| {
const output = try manifest.trailingLinesSplit(ctx.arena);
try ctx.translate.append(.{
.name = std.fs.path.stem(filename),
.name = try caseNameFromPath(ctx.arena, filename),
.c_frontend = c_frontend,
.target = b.resolveTargetQuery(target_query),
.link_libc = link_libc,
@ -416,7 +416,7 @@ fn addFromDirInner(
for (targets) |target_query| {
const output = try manifest.trailingSplit(ctx.arena);
try ctx.translate.append(.{
.name = std.fs.path.stem(filename),
.name = try caseNameFromPath(ctx.arena, filename),
.c_frontend = c_frontend,
.target = b.resolveTargetQuery(target_query),
.link_libc = link_libc,
@ -454,7 +454,7 @@ fn addFromDirInner(
const next = ctx.cases.items.len;
try ctx.cases.append(.{
.name = std.fs.path.stem(filename),
.name = try caseNameFromPath(ctx.arena, filename),
.import_path = std.fs.path.dirname(filename),
.backend = backend,
.files = .init(ctx.arena),
@ -1138,3 +1138,17 @@ fn knownFileExtension(filename: []const u8) bool {
if (it.next() != null) return false;
return false;
}
/// `path` is a path relative to the root case directory.
/// e.g. `compile_errors/undeclared_identifier.zig`
/// The case name is computed by removing the extension and substituting path separators for dots.
/// e.g. `compile_errors.undeclared_identifier`
/// Including the directory components makes `-Dtest-filter` more useful, because you can filter
/// based on subdirectory; e.g. `-Dtest-filter=compile_errors` to run the compile error tets.
fn caseNameFromPath(arena: Allocator, path: []const u8) Allocator.Error![]const u8 {
const ext_len = std.fs.path.extension(path).len;
const path_sans_ext = path[0 .. path.len - ext_len];
const result = try arena.dupe(u8, path_sans_ext);
std.mem.replaceScalar(u8, result, std.fs.path.sep, '.');
return result;
}

View File

@ -601,7 +601,8 @@ type_tag_handlers = {
'fn_void_no_args': lambda payload: 'fn() void',
'fn_naked_noreturn_no_args': lambda payload: 'fn() callconv(.naked) noreturn',
'fn_ccc_void_no_args': lambda payload: 'fn() callconv(.c) void',
'single_const_pointer_to_comptime_int': lambda payload: '*const comptime_int',
'ptr_usize': lambda payload: '*usize',
'ptr_const_comptime_int': lambda payload: '*const comptime_int',
'manyptr_u8': lambda payload: '[*]u8',
'manyptr_const_u8': lambda payload: '[*]const u8',
'manyptr_const_u8_sentinel_0': lambda payload: '[*:0]const u8',