Merge pull request #23834 from jacobly0/x86_64-rewrite

x86_64: finish rewriting scalar overflow and saturate operations
This commit is contained in:
Andrew Kelley 2025-05-18 14:36:33 -04:00 committed by GitHub
commit b77e601342
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
35 changed files with 37823 additions and 7779 deletions

View File

@ -402,7 +402,7 @@ pub const ExceptionFrameHeader = struct {
}
}
if (len == 0) return bad();
if (len == 0) return missing();
fbr.pos = left * entry_size;
// Read past the pc_begin field of the entry
@ -460,6 +460,8 @@ pub const ExceptionFrameHeader = struct {
@sizeOf(usize),
native_endian,
);
if (pc < fde.pc_begin or pc >= fde.pc_begin + fde.pc_range) return missing();
}
};

View File

@ -1633,7 +1633,7 @@ pub fn unwindFrameDwarf(
&cie,
&fde,
) catch |err| switch (err) {
error.InvalidDebugInfo => {
error.MissingDebugInfo => {
// `.eh_frame_hdr` appears to be incomplete, so go ahead and populate `cie_map`
// and `fde_list`, and fall back to the binary search logic below.
try di.scanCieFdeInfo(allocator, base_address);

View File

@ -774,18 +774,15 @@ pub fn Log2IntCeil(comptime T: type) type {
/// Returns the smallest integer type that can hold both from and to.
pub fn IntFittingRange(comptime from: comptime_int, comptime to: comptime_int) type {
assert(from <= to);
if (from == 0 and to == 0) {
return u0;
}
const signedness: std.builtin.Signedness = if (from < 0) .signed else .unsigned;
const largest_positive_integer = @max(if (from < 0) (-from) - 1 else from, to); // two's complement
const base = log2(largest_positive_integer);
const upper = (1 << base) - 1;
var magnitude_bits = if (upper >= largest_positive_integer) base else base + 1;
if (signedness == .signed) {
magnitude_bits += 1;
}
return std.meta.Int(signedness, magnitude_bits);
return @Type(.{ .int = .{
.signedness = signedness,
.bits = @as(u16, @intFromBool(signedness == .signed)) +
switch (if (from < 0) @max(@abs(from) - 1, to) else to) {
0 => 0,
else => |pos_max| 1 + log2(pos_max),
},
} });
}
test IntFittingRange {
@ -1267,6 +1264,19 @@ pub fn log2_int(comptime T: type, x: T) Log2Int(T) {
return @as(Log2Int(T), @intCast(@typeInfo(T).int.bits - 1 - @clz(x)));
}
test log2_int {
try testing.expect(log2_int(u32, 1) == 0);
try testing.expect(log2_int(u32, 2) == 1);
try testing.expect(log2_int(u32, 3) == 1);
try testing.expect(log2_int(u32, 4) == 2);
try testing.expect(log2_int(u32, 5) == 2);
try testing.expect(log2_int(u32, 6) == 2);
try testing.expect(log2_int(u32, 7) == 2);
try testing.expect(log2_int(u32, 8) == 3);
try testing.expect(log2_int(u32, 9) == 3);
try testing.expect(log2_int(u32, 10) == 3);
}
/// Return the log base 2 of integer value x, rounding up to the
/// nearest integer.
pub fn log2_int_ceil(comptime T: type, x: T) Log2IntCeil(T) {

View File

@ -415,12 +415,12 @@ pub const Mutable = struct {
// in the case that scalar happens to be small in magnitude within its type, but it
// is well worth being able to use the stack and not needing an allocator passed in.
// Note that Mutable.init still sets len to calcLimbLen(scalar) in any case.
const limb_len = comptime switch (@typeInfo(@TypeOf(scalar))) {
const limbs_len = comptime switch (@typeInfo(@TypeOf(scalar))) {
.comptime_int => calcLimbLen(scalar),
.int => |info| calcTwosCompLimbCount(info.bits),
else => @compileError("expected scalar to be an int"),
};
var limbs: [limb_len]Limb = undefined;
var limbs: [limbs_len]Limb = undefined;
const operand = init(&limbs, scalar).toConst();
return add(r, a, operand);
}
@ -2454,12 +2454,12 @@ pub const Const = struct {
// in the case that scalar happens to be small in magnitude within its type, but it
// is well worth being able to use the stack and not needing an allocator passed in.
// Note that Mutable.init still sets len to calcLimbLen(scalar) in any case.
const limb_len = comptime switch (@typeInfo(@TypeOf(scalar))) {
const limbs_len = comptime switch (@typeInfo(@TypeOf(scalar))) {
.comptime_int => calcLimbLen(scalar),
.int => |info| calcTwosCompLimbCount(info.bits),
else => @compileError("expected scalar to be an int"),
};
var limbs: [limb_len]Limb = undefined;
var limbs: [limbs_len]Limb = undefined;
const rhs = Mutable.init(&limbs, scalar);
return order(lhs, rhs.toConst());
}

View File

@ -2295,8 +2295,6 @@ test "sat shift-left signed simple positive" {
}
test "sat shift-left signed multi positive" {
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
var x: SignedDoubleLimb = 1;
_ = &x;
@ -2310,8 +2308,6 @@ test "sat shift-left signed multi positive" {
}
test "sat shift-left signed multi negative" {
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
var x: SignedDoubleLimb = -1;
_ = &x;

View File

@ -12,12 +12,10 @@ const expect = std.testing.expect;
/// - log2(nan) = nan
pub fn log2(x: anytype) @TypeOf(x) {
const T = @TypeOf(x);
switch (@typeInfo(T)) {
.comptime_float => {
return @as(comptime_float, @log2(x));
},
.float => return @log2(x),
return switch (@typeInfo(T)) {
.comptime_float, .float => @log2(x),
.comptime_int => comptime {
std.debug.assert(x > 0);
var x_shifted = x;
// First, calculate floorPowerOfTwo(x)
var shift_amt = 1;
@ -34,12 +32,15 @@ pub fn log2(x: anytype) @TypeOf(x) {
}
return result;
},
.int => |IntType| switch (IntType.signedness) {
.signed => @compileError("log2 not implemented for signed integers"),
.unsigned => return math.log2_int(T, x),
},
.int => |int_info| math.log2_int(switch (int_info.signedness) {
.signed => @Type(.{ .int = .{
.signedness = .unsigned,
.bits = int_info.bits -| 1,
} }),
.unsigned => T,
}, @intCast(x)),
else => @compileError("log2 not implemented for " ++ @typeName(T)),
}
};
}
test log2 {

View File

@ -2142,7 +2142,7 @@ pub const Inst = struct {
ref_start_index = static_len,
_,
pub const static_len = 97;
pub const static_len = 101;
pub fn toRef(i: Index) Inst.Ref {
return @enumFromInt(@intFromEnum(Index.ref_start_index) + @intFromEnum(i));
@ -2225,6 +2225,7 @@ pub const Inst = struct {
single_const_pointer_to_comptime_int_type,
slice_const_u8_type,
slice_const_u8_sentinel_0_type,
vector_8_i8_type,
vector_16_i8_type,
vector_32_i8_type,
vector_1_u8_type,
@ -2233,8 +2234,10 @@ pub const Inst = struct {
vector_8_u8_type,
vector_16_u8_type,
vector_32_u8_type,
vector_4_i16_type,
vector_8_i16_type,
vector_16_i16_type,
vector_4_u16_type,
vector_8_u16_type,
vector_16_u16_type,
vector_4_i32_type,
@ -2245,6 +2248,7 @@ pub const Inst = struct {
vector_4_i64_type,
vector_2_u64_type,
vector_4_u64_type,
vector_2_u128_type,
vector_4_f16_type,
vector_8_f16_type,
vector_2_f32_type,

View File

@ -1115,14 +1115,15 @@ static inline bool zig_mulo_i16(int16_t *res, int16_t lhs, int16_t rhs, uint8_t
\
static inline uint##w##_t zig_shls_u##w(uint##w##_t lhs, uint##w##_t rhs, uint8_t bits) { \
uint##w##_t res; \
if (rhs >= bits) return lhs != UINT##w##_C(0) ? zig_maxInt_u(w, bits) : lhs; \
return zig_shlo_u##w(&res, lhs, (uint8_t)rhs, bits) ? zig_maxInt_u(w, bits) : res; \
if (rhs < bits && !zig_shlo_u##w(&res, lhs, rhs, bits)) return res; \
return lhs == INT##w##_C(0) ? INT##w##_C(0) : zig_maxInt_u(w, bits); \
} \
\
static inline int##w##_t zig_shls_i##w(int##w##_t lhs, int##w##_t rhs, uint8_t bits) { \
static inline int##w##_t zig_shls_i##w(int##w##_t lhs, uint##w##_t rhs, uint8_t bits) { \
int##w##_t res; \
if ((uint##w##_t)rhs < (uint##w##_t)bits && !zig_shlo_i##w(&res, lhs, (uint8_t)rhs, bits)) return res; \
return lhs < INT##w##_C(0) ? zig_minInt_i(w, bits) : zig_maxInt_i(w, bits); \
if (rhs < bits && !zig_shlo_i##w(&res, lhs, rhs, bits)) return res; \
return lhs == INT##w##_C(0) ? INT##w##_C(0) : \
lhs < INT##w##_C(0) ? zig_minInt_i(w, bits) : zig_maxInt_i(w, bits); \
} \
\
static inline uint##w##_t zig_adds_u##w(uint##w##_t lhs, uint##w##_t rhs, uint8_t bits) { \
@ -1851,15 +1852,23 @@ static inline bool zig_shlo_i128(zig_i128 *res, zig_i128 lhs, uint8_t rhs, uint8
static inline zig_u128 zig_shls_u128(zig_u128 lhs, zig_u128 rhs, uint8_t bits) {
zig_u128 res;
if (zig_cmp_u128(rhs, zig_make_u128(0, bits)) >= INT32_C(0))
return zig_cmp_u128(lhs, zig_make_u128(0, 0)) != INT32_C(0) ? zig_maxInt_u(128, bits) : lhs;
return zig_shlo_u128(&res, lhs, (uint8_t)zig_lo_u128(rhs), bits) ? zig_maxInt_u(128, bits) : res;
if (zig_cmp_u128(rhs, zig_make_u128(0, bits)) < INT32_C(0) && !zig_shlo_u128(&res, lhs, (uint8_t)zig_lo_u128(rhs), bits)) return res;
switch (zig_cmp_u128(lhs, zig_make_u128(0, 0))) {
case 0: return zig_make_u128(0, 0);
case 1: return zig_maxInt_u(128, bits);
default: zig_unreachable();
}
}
static inline zig_i128 zig_shls_i128(zig_i128 lhs, zig_i128 rhs, uint8_t bits) {
static inline zig_i128 zig_shls_i128(zig_i128 lhs, zig_u128 rhs, uint8_t bits) {
zig_i128 res;
if (zig_cmp_u128(zig_bitCast_u128(rhs), zig_make_u128(0, bits)) < INT32_C(0) && !zig_shlo_i128(&res, lhs, (uint8_t)zig_lo_i128(rhs), bits)) return res;
return zig_cmp_i128(lhs, zig_make_i128(0, 0)) < INT32_C(0) ? zig_minInt_i(128, bits) : zig_maxInt_i(128, bits);
if (zig_cmp_u128(rhs, zig_make_u128(0, bits)) < INT32_C(0) && !zig_shlo_i128(&res, lhs, (uint8_t)zig_lo_u128(rhs), bits)) return res;
switch (zig_cmp_i128(lhs, zig_make_i128(0, 0))) {
case -1: return zig_minInt_i(128, bits);
case 0: return zig_make_i128(0, 0);
case 1: return zig_maxInt_i(128, bits);
default: zig_unreachable();
}
}
static inline zig_u128 zig_adds_u128(zig_u128 lhs, zig_u128 rhs, uint8_t bits) {

View File

@ -257,7 +257,9 @@ pub const Inst = struct {
/// it shifts out any bits that disagree with the resultant sign bit.
/// Uses the `bin_op` field.
shl_exact,
/// Saturating integer shift left. `<<|`
/// Saturating integer shift left. `<<|`. The result is the same type as the `lhs`.
/// The `rhs` must have the same vector shape as the `lhs`, but with any unsigned
/// integer as the scalar type.
/// Uses the `bin_op` field.
shl_sat,
/// Bitwise XOR. `^`
@ -995,6 +997,7 @@ pub const Inst = struct {
single_const_pointer_to_comptime_int_type = @intFromEnum(InternPool.Index.single_const_pointer_to_comptime_int_type),
slice_const_u8_type = @intFromEnum(InternPool.Index.slice_const_u8_type),
slice_const_u8_sentinel_0_type = @intFromEnum(InternPool.Index.slice_const_u8_sentinel_0_type),
vector_8_i8_type = @intFromEnum(InternPool.Index.vector_8_i8_type),
vector_16_i8_type = @intFromEnum(InternPool.Index.vector_16_i8_type),
vector_32_i8_type = @intFromEnum(InternPool.Index.vector_32_i8_type),
vector_1_u8_type = @intFromEnum(InternPool.Index.vector_1_u8_type),
@ -1003,8 +1006,10 @@ pub const Inst = struct {
vector_8_u8_type = @intFromEnum(InternPool.Index.vector_8_u8_type),
vector_16_u8_type = @intFromEnum(InternPool.Index.vector_16_u8_type),
vector_32_u8_type = @intFromEnum(InternPool.Index.vector_32_u8_type),
vector_4_i16_type = @intFromEnum(InternPool.Index.vector_4_i16_type),
vector_8_i16_type = @intFromEnum(InternPool.Index.vector_8_i16_type),
vector_16_i16_type = @intFromEnum(InternPool.Index.vector_16_i16_type),
vector_4_u16_type = @intFromEnum(InternPool.Index.vector_4_u16_type),
vector_8_u16_type = @intFromEnum(InternPool.Index.vector_8_u16_type),
vector_16_u16_type = @intFromEnum(InternPool.Index.vector_16_u16_type),
vector_4_i32_type = @intFromEnum(InternPool.Index.vector_4_i32_type),
@ -1015,6 +1020,7 @@ pub const Inst = struct {
vector_4_i64_type = @intFromEnum(InternPool.Index.vector_4_i64_type),
vector_2_u64_type = @intFromEnum(InternPool.Index.vector_2_u64_type),
vector_4_u64_type = @intFromEnum(InternPool.Index.vector_4_u64_type),
vector_2_u128_type = @intFromEnum(InternPool.Index.vector_2_u128_type),
vector_4_f16_type = @intFromEnum(InternPool.Index.vector_4_f16_type),
vector_8_f16_type = @intFromEnum(InternPool.Index.vector_8_f16_type),
vector_2_f32_type = @intFromEnum(InternPool.Index.vector_2_f32_type),

View File

@ -4572,6 +4572,7 @@ pub const Index = enum(u32) {
slice_const_u8_type,
slice_const_u8_sentinel_0_type,
vector_8_i8_type,
vector_16_i8_type,
vector_32_i8_type,
vector_1_u8_type,
@ -4580,8 +4581,10 @@ pub const Index = enum(u32) {
vector_8_u8_type,
vector_16_u8_type,
vector_32_u8_type,
vector_4_i16_type,
vector_8_i16_type,
vector_16_i16_type,
vector_4_u16_type,
vector_8_u16_type,
vector_16_u16_type,
vector_4_i32_type,
@ -4592,6 +4595,7 @@ pub const Index = enum(u32) {
vector_4_i64_type,
vector_2_u64_type,
vector_4_u64_type,
vector_2_u128_type,
vector_4_f16_type,
vector_8_f16_type,
vector_2_f32_type,
@ -5090,6 +5094,8 @@ pub const static_keys = [_]Key{
},
} },
// @Vector(8, i8)
.{ .vector_type = .{ .len = 8, .child = .i8_type } },
// @Vector(16, i8)
.{ .vector_type = .{ .len = 16, .child = .i8_type } },
// @Vector(32, i8)
@ -5106,10 +5112,14 @@ pub const static_keys = [_]Key{
.{ .vector_type = .{ .len = 16, .child = .u8_type } },
// @Vector(32, u8)
.{ .vector_type = .{ .len = 32, .child = .u8_type } },
// @Vector(4, i16)
.{ .vector_type = .{ .len = 4, .child = .i16_type } },
// @Vector(8, i16)
.{ .vector_type = .{ .len = 8, .child = .i16_type } },
// @Vector(16, i16)
.{ .vector_type = .{ .len = 16, .child = .i16_type } },
// @Vector(4, u16)
.{ .vector_type = .{ .len = 4, .child = .u16_type } },
// @Vector(8, u16)
.{ .vector_type = .{ .len = 8, .child = .u16_type } },
// @Vector(16, u16)
@ -5130,6 +5140,8 @@ pub const static_keys = [_]Key{
.{ .vector_type = .{ .len = 2, .child = .u64_type } },
// @Vector(8, u64)
.{ .vector_type = .{ .len = 4, .child = .u64_type } },
// @Vector(2, u128)
.{ .vector_type = .{ .len = 2, .child = .u128_type } },
// @Vector(4, f16)
.{ .vector_type = .{ .len = 4, .child = .f16_type } },
// @Vector(8, f16)
@ -11777,6 +11789,7 @@ pub fn typeOf(ip: *const InternPool, index: Index) Index {
.single_const_pointer_to_comptime_int_type,
.slice_const_u8_type,
.slice_const_u8_sentinel_0_type,
.vector_8_i8_type,
.vector_16_i8_type,
.vector_32_i8_type,
.vector_1_u8_type,
@ -11785,8 +11798,10 @@ pub fn typeOf(ip: *const InternPool, index: Index) Index {
.vector_8_u8_type,
.vector_16_u8_type,
.vector_32_u8_type,
.vector_4_i16_type,
.vector_8_i16_type,
.vector_16_i16_type,
.vector_4_u16_type,
.vector_8_u16_type,
.vector_16_u16_type,
.vector_4_i32_type,
@ -11797,6 +11812,7 @@ pub fn typeOf(ip: *const InternPool, index: Index) Index {
.vector_4_i64_type,
.vector_2_u64_type,
.vector_4_u64_type,
.vector_2_u128_type,
.vector_4_f16_type,
.vector_8_f16_type,
.vector_2_f32_type,
@ -12121,6 +12137,7 @@ pub fn zigTypeTag(ip: *const InternPool, index: Index) std.builtin.TypeId {
.slice_const_u8_sentinel_0_type,
=> .pointer,
.vector_8_i8_type,
.vector_16_i8_type,
.vector_32_i8_type,
.vector_1_u8_type,
@ -12129,8 +12146,10 @@ pub fn zigTypeTag(ip: *const InternPool, index: Index) std.builtin.TypeId {
.vector_8_u8_type,
.vector_16_u8_type,
.vector_32_u8_type,
.vector_4_i16_type,
.vector_8_i16_type,
.vector_16_i16_type,
.vector_4_u16_type,
.vector_8_u16_type,
.vector_16_u16_type,
.vector_4_i32_type,
@ -12141,6 +12160,7 @@ pub fn zigTypeTag(ip: *const InternPool, index: Index) std.builtin.TypeId {
.vector_4_i64_type,
.vector_2_u64_type,
.vector_4_u64_type,
.vector_2_u128_type,
.vector_4_f16_type,
.vector_8_f16_type,
.vector_2_f32_type,

View File

@ -14215,14 +14215,15 @@ fn zirShl(
const rhs_ty = sema.typeOf(rhs);
const src = block.nodeOffset(inst_data.src_node);
const lhs_src = switch (air_tag) {
.shl, .shl_sat => block.src(.{ .node_offset_bin_lhs = inst_data.src_node }),
.shl_exact => block.builtinCallArgSrc(inst_data.src_node, 0),
else => unreachable,
};
const rhs_src = switch (air_tag) {
.shl, .shl_sat => block.src(.{ .node_offset_bin_rhs = inst_data.src_node }),
.shl_exact => block.builtinCallArgSrc(inst_data.src_node, 1),
const lhs_src, const rhs_src = switch (air_tag) {
.shl, .shl_sat => .{
block.src(.{ .node_offset_bin_lhs = inst_data.src_node }),
block.src(.{ .node_offset_bin_rhs = inst_data.src_node }),
},
.shl_exact => .{
block.builtinCallArgSrc(inst_data.src_node, 0),
block.builtinCallArgSrc(inst_data.src_node, 1),
},
else => unreachable,
};
@ -14231,8 +14232,7 @@ fn zirShl(
const scalar_ty = lhs_ty.scalarType(zcu);
const scalar_rhs_ty = rhs_ty.scalarType(zcu);
// TODO coerce rhs if air_tag is not shl_sat
const rhs_is_comptime_int = try sema.checkIntType(block, rhs_src, scalar_rhs_ty);
_ = try sema.checkIntType(block, rhs_src, scalar_rhs_ty);
const maybe_lhs_val = try sema.resolveValueResolveLazy(lhs);
const maybe_rhs_val = try sema.resolveValueResolveLazy(rhs);
@ -14245,7 +14245,7 @@ fn zirShl(
if (try rhs_val.compareAllWithZeroSema(.eq, pt)) {
return lhs;
}
if (scalar_ty.zigTypeTag(zcu) != .comptime_int and air_tag != .shl_sat) {
if (air_tag != .shl_sat and scalar_ty.zigTypeTag(zcu) != .comptime_int) {
const bit_value = try pt.intValue(Type.comptime_int, scalar_ty.intInfo(zcu).bits);
if (rhs_ty.zigTypeTag(zcu) == .vector) {
var i: usize = 0;
@ -14282,6 +14282,8 @@ fn zirShl(
rhs_val.fmtValueSema(pt, sema),
});
}
} else if (scalar_rhs_ty.isSignedInt(zcu)) {
return sema.fail(block, rhs_src, "shift by signed type '{}'", .{rhs_ty.fmt(pt)});
}
const runtime_src = if (maybe_lhs_val) |lhs_val| rs: {
@ -14309,18 +14311,34 @@ fn zirShl(
return Air.internedToRef(val.toIntern());
} else lhs_src;
const new_rhs = if (air_tag == .shl_sat) rhs: {
// Limit the RHS type for saturating shl to be an integer as small as the LHS.
if (rhs_is_comptime_int or
scalar_rhs_ty.intInfo(zcu).bits > scalar_ty.intInfo(zcu).bits)
{
const max_int = Air.internedToRef((try lhs_ty.maxInt(pt, lhs_ty)).toIntern());
const rhs_limited = try sema.analyzeMinMax(block, rhs_src, .min, &.{ rhs, max_int }, &.{ rhs_src, rhs_src });
break :rhs try sema.intCast(block, src, lhs_ty, rhs_src, rhs_limited, rhs_src, false, false);
} else {
break :rhs rhs;
}
} else rhs;
const rt_rhs = switch (air_tag) {
else => unreachable,
.shl, .shl_exact => rhs,
// The backend can handle a large runtime rhs better than we can, but
// we can limit a large comptime rhs better here. This also has the
// necessary side effect of preventing rhs from being a `comptime_int`.
.shl_sat => if (maybe_rhs_val) |rhs_val| Air.internedToRef(rt_rhs: {
const bit_count = scalar_ty.intInfo(zcu).bits;
const rt_rhs_scalar_ty = try pt.smallestUnsignedInt(bit_count);
if (!rhs_ty.isVector(zcu)) break :rt_rhs (try pt.intValue(
rt_rhs_scalar_ty,
@min(try rhs_val.getUnsignedIntSema(pt) orelse bit_count, bit_count),
)).toIntern();
const rhs_len = rhs_ty.vectorLen(zcu);
const rhs_elems = try sema.arena.alloc(InternPool.Index, rhs_len);
for (rhs_elems, 0..) |*rhs_elem, i| rhs_elem.* = (try pt.intValue(
rt_rhs_scalar_ty,
@min(try (try rhs_val.elemValue(pt, i)).getUnsignedIntSema(pt) orelse bit_count, bit_count),
)).toIntern();
break :rt_rhs try pt.intern(.{ .aggregate = .{
.ty = (try pt.vectorType(.{
.len = rhs_len,
.child = rt_rhs_scalar_ty.toIntern(),
})).toIntern(),
.storage = .{ .elems = rhs_elems },
} });
}) else rhs,
};
try sema.requireRuntimeBlock(block, src, runtime_src);
if (block.wantSafety()) {
@ -14374,7 +14392,7 @@ fn zirShl(
return sema.tupleFieldValByIndex(block, op_ov, 0, op_ov_tuple_ty);
}
}
return block.addBinOp(air_tag, lhs, new_rhs);
return block.addBinOp(air_tag, lhs, rt_rhs);
}
fn zirShr(
@ -36432,10 +36450,7 @@ fn generateUnionTagTypeSimple(
const enum_ty = try ip.getGeneratedTagEnumType(gpa, pt.tid, .{
.name = name,
.owner_union_ty = union_type,
.tag_ty = if (enum_field_names.len == 0)
(try pt.intType(.unsigned, 0)).toIntern()
else
(try pt.smallestUnsignedInt(enum_field_names.len - 1)).toIntern(),
.tag_ty = (try pt.smallestUnsignedInt(enum_field_names.len -| 1)).toIntern(),
.names = enum_field_names,
.values = &.{},
.tag_mode = .auto,
@ -36502,6 +36517,7 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value {
.single_const_pointer_to_comptime_int_type,
.slice_const_u8_type,
.slice_const_u8_sentinel_0_type,
.vector_8_i8_type,
.vector_16_i8_type,
.vector_32_i8_type,
.vector_1_u8_type,
@ -36510,8 +36526,10 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value {
.vector_8_u8_type,
.vector_16_u8_type,
.vector_32_u8_type,
.vector_4_i16_type,
.vector_8_i16_type,
.vector_16_i16_type,
.vector_4_u16_type,
.vector_8_u16_type,
.vector_16_u16_type,
.vector_4_i32_type,
@ -36522,6 +36540,7 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value {
.vector_4_i64_type,
.vector_2_u64_type,
.vector_4_u64_type,
.vector_2_u128_type,
.vector_4_f16_type,
.vector_8_f16_type,
.vector_2_f32_type,

View File

@ -4096,6 +4096,7 @@ pub const single_const_pointer_to_comptime_int: Type = .{ .ip_index = .single_co
pub const slice_const_u8: Type = .{ .ip_index = .slice_const_u8_type };
pub const slice_const_u8_sentinel_0: Type = .{ .ip_index = .slice_const_u8_sentinel_0_type };
pub const vector_8_i8: Type = .{ .ip_index = .vector_8_i8_type };
pub const vector_16_i8: Type = .{ .ip_index = .vector_16_i8_type };
pub const vector_32_i8: Type = .{ .ip_index = .vector_32_i8_type };
pub const vector_1_u8: Type = .{ .ip_index = .vector_1_u8_type };
@ -4104,8 +4105,10 @@ pub const vector_4_u8: Type = .{ .ip_index = .vector_4_u8_type };
pub const vector_8_u8: Type = .{ .ip_index = .vector_8_u8_type };
pub const vector_16_u8: Type = .{ .ip_index = .vector_16_u8_type };
pub const vector_32_u8: Type = .{ .ip_index = .vector_32_u8_type };
pub const vector_4_i16: Type = .{ .ip_index = .vector_4_i16_type };
pub const vector_8_i16: Type = .{ .ip_index = .vector_8_i16_type };
pub const vector_16_i16: Type = .{ .ip_index = .vector_16_i16_type };
pub const vector_4_u16: Type = .{ .ip_index = .vector_4_u16_type };
pub const vector_8_u16: Type = .{ .ip_index = .vector_8_u16_type };
pub const vector_16_u16: Type = .{ .ip_index = .vector_16_u16_type };
pub const vector_4_i32: Type = .{ .ip_index = .vector_4_i32_type };
@ -4116,6 +4119,7 @@ pub const vector_2_i64: Type = .{ .ip_index = .vector_2_i64_type };
pub const vector_4_i64: Type = .{ .ip_index = .vector_4_i64_type };
pub const vector_2_u64: Type = .{ .ip_index = .vector_2_u64_type };
pub const vector_4_u64: Type = .{ .ip_index = .vector_4_u64_type };
pub const vector_2_u128: Type = .{ .ip_index = .vector_2_u128_type };
pub const vector_4_f16: Type = .{ .ip_index = .vector_4_f16_type };
pub const vector_8_f16: Type = .{ .ip_index = .vector_8_f16_type };
pub const vector_2_f32: Type = .{ .ip_index = .vector_2_f32_type };
@ -4129,10 +4133,10 @@ pub const empty_tuple: Type = .{ .ip_index = .empty_tuple_type };
pub const generic_poison: Type = .{ .ip_index = .generic_poison_type };
pub fn smallestUnsignedBits(max: u64) u16 {
if (max == 0) return 0;
const base = std.math.log2(max);
const upper = (@as(u64, 1) << @as(u6, @intCast(base))) - 1;
return @as(u16, @intCast(base + @intFromBool(upper < max)));
return switch (max) {
0 => 0,
else => 1 + std.math.log2_int(u64, max),
};
}
/// This is only used for comptime asserts. Bump this number when you make a change

File diff suppressed because it is too large Load Diff

View File

@ -578,6 +578,11 @@ pub const RegisterClass = struct {
for (allocatable_regs, 0..) |reg, index| if (reg.class() == .general_purpose) set.set(index);
break :blk set;
};
pub const gphi: RegisterBitSet = blk: {
var set = RegisterBitSet.initEmpty();
for (allocatable_regs, 0..) |reg, index| if (reg.hasHi8()) set.set(index);
break :blk set;
};
pub const x87: RegisterBitSet = blk: {
var set = RegisterBitSet.initEmpty();
for (allocatable_regs, 0..) |reg, index| if (reg.class() == .x87) set.set(index);

View File

@ -529,12 +529,36 @@ pub const Register = enum(u8) {
16 => reg.to16(),
32 => reg.to32(),
64 => reg.to64(),
80 => reg.to80(),
128 => reg.to128(),
256 => reg.to256(),
512 => reg.to512(),
else => unreachable,
};
}
pub fn toSize(reg: Register, size: Memory.Size, target: *const std.Target) Register {
return switch (size) {
.none => unreachable,
.ptr => reg.toBitSize(target.ptrBitWidth()),
.gpr => switch (target.cpu.arch) {
else => unreachable,
.x86 => reg.to32(),
.x86_64 => reg.to64(),
},
.low_byte => reg.toLo8(),
.high_byte => reg.toHi8(),
.byte => reg.to8(),
.word => reg.to16(),
.dword => reg.to32(),
.qword => reg.to64(),
.tbyte => reg.to80(),
.xword => reg.to128(),
.yword => reg.to256(),
.zword => reg.to512(),
};
}
fn gpBase(reg: Register) u7 {
return switch (@intFromEnum(reg)) {
// zig fmt: off
@ -549,24 +573,62 @@ pub const Register = enum(u8) {
}
pub fn to64(reg: Register) Register {
return @enumFromInt(@intFromEnum(reg) - reg.gpBase() + @intFromEnum(Register.rax));
return switch (reg.class()) {
.general_purpose, .gphi => @enumFromInt(@intFromEnum(reg) - reg.gpBase() + @intFromEnum(Register.rax)),
.segment => unreachable,
.x87, .mmx, .cr, .dr => reg,
.sse => reg.to128(),
.ip => .rip,
};
}
pub fn to32(reg: Register) Register {
return @enumFromInt(@intFromEnum(reg) - reg.gpBase() + @intFromEnum(Register.eax));
return switch (reg.class()) {
.general_purpose, .gphi => @enumFromInt(@intFromEnum(reg) - reg.gpBase() + @intFromEnum(Register.eax)),
.segment => unreachable,
.x87, .mmx, .cr, .dr => reg,
.sse => reg.to128(),
.ip => .eip,
};
}
pub fn to16(reg: Register) Register {
return @enumFromInt(@intFromEnum(reg) - reg.gpBase() + @intFromEnum(Register.ax));
return switch (reg.class()) {
.general_purpose, .gphi => @enumFromInt(@intFromEnum(reg) - reg.gpBase() + @intFromEnum(Register.ax)),
.segment, .x87, .mmx, .cr, .dr => reg,
.sse => reg.to128(),
.ip => .ip,
};
}
pub fn to8(reg: Register) Register {
return switch (@intFromEnum(reg)) {
else => @enumFromInt(@intFromEnum(reg) - reg.gpBase() + @intFromEnum(Register.al)),
@intFromEnum(Register.ah)...@intFromEnum(Register.bh) => reg,
return switch (reg.class()) {
.general_purpose => reg.toLo8(),
.gphi, .segment, .x87, .mmx, .cr, .dr => reg,
.sse => reg.to128(),
.ip => .ip,
};
}
pub fn toLo8(reg: Register) Register {
return @enumFromInt(@intFromEnum(reg) - reg.gpBase() + @intFromEnum(Register.al));
}
pub fn toHi8(reg: Register) Register {
assert(reg.hasHi8());
return @enumFromInt(@intFromEnum(reg) - reg.gpBase() + @intFromEnum(Register.ah));
}
pub fn hasHi8(reg: Register) bool {
const reg_id = reg.id();
return (reg_id >= comptime Register.ah.id()) and reg_id <= comptime Register.bh.id();
}
pub fn to80(reg: Register) Register {
assert(reg.class() == .x87);
return reg;
}
fn sseBase(reg: Register) u8 {
assert(reg.class() == .sse);
return switch (@intFromEnum(reg)) {
@ -577,6 +639,10 @@ pub const Register = enum(u8) {
};
}
pub fn to512(reg: Register) Register {
return @enumFromInt(@intFromEnum(reg) - reg.sseBase() + @intFromEnum(Register.zmm0));
}
pub fn to256(reg: Register) Register {
return @enumFromInt(@intFromEnum(reg) - reg.sseBase() + @intFromEnum(Register.ymm0));
}
@ -710,6 +776,8 @@ pub const Memory = struct {
none,
ptr,
gpr,
low_byte,
high_byte,
byte,
word,
dword,
@ -755,7 +823,7 @@ pub const Memory = struct {
.x86 => 32,
.x86_64 => 64,
},
.byte => 8,
.low_byte, .high_byte, .byte => 8,
.word => 16,
.dword => 32,
.qword => 64,

View File

@ -2330,8 +2330,8 @@
.{ .pext, .rvm, .{ .r32, .r32, .rm32 }, .{ 0xf3, 0x0f, 0x38, 0xf5 }, 0, .vex_lz_w0, .bmi2 },
.{ .pext, .rvm, .{ .r64, .r64, .rm64 }, .{ 0xf3, 0x0f, 0x38, 0xf5 }, 0, .vex_lz_w1, .bmi2 },
.{ .rorx, .rmi, .{ .r32, .rm32, .imm8 }, .{ 0xf2, 0x0f, 0x3a }, 0, .vex_lz_w0, .bmi2 },
.{ .rorx, .rmi, .{ .r64, .rm64, .imm8 }, .{ 0xf2, 0x0f, 0x3a }, 0, .vex_lz_w1, .bmi2 },
.{ .rorx, .rmi, .{ .r32, .rm32, .imm8 }, .{ 0xf2, 0x0f, 0x3a, 0xf0 }, 0, .vex_lz_w0, .bmi2 },
.{ .rorx, .rmi, .{ .r64, .rm64, .imm8 }, .{ 0xf2, 0x0f, 0x3a, 0xf0 }, 0, .vex_lz_w1, .bmi2 },
.{ .sarx, .rmv, .{ .r32, .rm32, .r32 }, .{ 0xf3, 0x0f, 0x38, 0xf7 }, 0, .vex_lz_w0, .bmi2 },
.{ .shlx, .rmv, .{ .r32, .rm32, .r32 }, .{ 0x66, 0x0f, 0x38, 0xf7 }, 0, .vex_lz_w0, .bmi2 },

View File

@ -1443,6 +1443,21 @@ pub const Pool = struct {
return pool.fromFields(allocator, .@"struct", &fields, kind);
},
.vector_8_i8_type => {
const vector_ctype = try pool.getVector(allocator, .{
.elem_ctype = .i8,
.len = 8,
});
if (!kind.isParameter()) return vector_ctype;
var fields = [_]Info.Field{
.{
.name = .{ .index = .array },
.ctype = vector_ctype,
.alignas = AlignAs.fromAbiAlignment(Type.i8.abiAlignment(zcu)),
},
};
return pool.fromFields(allocator, .@"struct", &fields, kind);
},
.vector_16_i8_type => {
const vector_ctype = try pool.getVector(allocator, .{
.elem_ctype = .i8,
@ -1563,6 +1578,21 @@ pub const Pool = struct {
};
return pool.fromFields(allocator, .@"struct", &fields, kind);
},
.vector_4_i16_type => {
const vector_ctype = try pool.getVector(allocator, .{
.elem_ctype = .i16,
.len = 4,
});
if (!kind.isParameter()) return vector_ctype;
var fields = [_]Info.Field{
.{
.name = .{ .index = .array },
.ctype = vector_ctype,
.alignas = AlignAs.fromAbiAlignment(Type.i16.abiAlignment(zcu)),
},
};
return pool.fromFields(allocator, .@"struct", &fields, kind);
},
.vector_8_i16_type => {
const vector_ctype = try pool.getVector(allocator, .{
.elem_ctype = .i16,
@ -1593,6 +1623,21 @@ pub const Pool = struct {
};
return pool.fromFields(allocator, .@"struct", &fields, kind);
},
.vector_4_u16_type => {
const vector_ctype = try pool.getVector(allocator, .{
.elem_ctype = .u16,
.len = 4,
});
if (!kind.isParameter()) return vector_ctype;
var fields = [_]Info.Field{
.{
.name = .{ .index = .array },
.ctype = vector_ctype,
.alignas = AlignAs.fromAbiAlignment(Type.u16.abiAlignment(zcu)),
},
};
return pool.fromFields(allocator, .@"struct", &fields, kind);
},
.vector_8_u16_type => {
const vector_ctype = try pool.getVector(allocator, .{
.elem_ctype = .u16,
@ -1743,6 +1788,21 @@ pub const Pool = struct {
};
return pool.fromFields(allocator, .@"struct", &fields, kind);
},
.vector_2_u128_type => {
const vector_ctype = try pool.getVector(allocator, .{
.elem_ctype = .u128,
.len = 2,
});
if (!kind.isParameter()) return vector_ctype;
var fields = [_]Info.Field{
.{
.name = .{ .index = .array },
.ctype = vector_ctype,
.alignas = AlignAs.fromAbiAlignment(Type.u128.abiAlignment(zcu)),
},
};
return pool.fromFields(allocator, .@"struct", &fields, kind);
},
.vector_4_f16_type => {
const vector_ctype = try pool.getVector(allocator, .{
.elem_ctype = .f16,

View File

@ -9023,19 +9023,25 @@ pub const FuncGen = struct {
const rhs = try self.resolveInst(bin_op.rhs);
const lhs_ty = self.typeOf(bin_op.lhs);
const lhs_scalar_ty = lhs_ty.scalarType(zcu);
const lhs_bits = lhs_scalar_ty.bitSize(zcu);
const casted_rhs = try self.wip.conv(.unsigned, rhs, try o.lowerType(lhs_ty), "");
const lhs_info = lhs_ty.intInfo(zcu);
const llvm_lhs_ty = try o.lowerType(lhs_ty);
const llvm_lhs_scalar_ty = llvm_lhs_ty.scalarType(&o.builder);
const rhs_ty = self.typeOf(bin_op.rhs);
const rhs_info = rhs_ty.intInfo(zcu);
assert(rhs_info.signedness == .unsigned);
const llvm_rhs_ty = try o.lowerType(rhs_ty);
const llvm_rhs_scalar_ty = llvm_rhs_ty.scalarType(&o.builder);
const result = try self.wip.callIntrinsic(
.normal,
.none,
if (lhs_scalar_ty.isSignedInt(zcu)) .@"sshl.sat" else .@"ushl.sat",
switch (lhs_info.signedness) {
.signed => .@"sshl.sat",
.unsigned => .@"ushl.sat",
},
&.{llvm_lhs_ty},
&.{ lhs, casted_rhs },
&.{ lhs, try self.wip.conv(.unsigned, rhs, llvm_lhs_ty, "") },
"",
);
@ -9044,16 +9050,45 @@ pub const FuncGen = struct {
// poison value."
// However Zig semantics says that saturating shift left can never produce
// undefined; instead it saturates.
if (rhs_info.bits <= math.log2_int(u16, lhs_info.bits)) return result;
const bits = try o.builder.splatValue(
llvm_lhs_ty,
try o.builder.intConst(llvm_lhs_scalar_ty, lhs_bits),
llvm_rhs_ty,
try o.builder.intConst(llvm_rhs_scalar_ty, lhs_info.bits),
);
const lhs_max = try o.builder.splatValue(
llvm_lhs_ty,
try o.builder.intConst(llvm_lhs_scalar_ty, -1),
);
const in_range = try self.wip.icmp(.ult, casted_rhs, bits, "");
return self.wip.select(.normal, in_range, result, lhs_max, "");
const in_range = try self.wip.icmp(.ult, rhs, bits, "");
const lhs_sat = lhs_sat: switch (lhs_info.signedness) {
.signed => {
const zero = try o.builder.splatValue(
llvm_lhs_ty,
try o.builder.intConst(llvm_lhs_scalar_ty, 0),
);
const smin = try o.builder.splatValue(
llvm_lhs_ty,
try minIntConst(&o.builder, lhs_ty, llvm_lhs_ty, zcu),
);
const smax = try o.builder.splatValue(
llvm_lhs_ty,
try maxIntConst(&o.builder, lhs_ty, llvm_lhs_ty, zcu),
);
const lhs_lt_zero = try self.wip.icmp(.slt, lhs, zero, "");
const slimit = try self.wip.select(.normal, lhs_lt_zero, smin, smax, "");
const lhs_eq_zero = try self.wip.icmp(.eq, lhs, zero, "");
break :lhs_sat try self.wip.select(.normal, lhs_eq_zero, zero, slimit, "");
},
.unsigned => {
const zero = try o.builder.splatValue(
llvm_lhs_ty,
try o.builder.intConst(llvm_lhs_scalar_ty, 0),
);
const umax = try o.builder.splatValue(
llvm_lhs_ty,
try o.builder.intConst(llvm_lhs_scalar_ty, -1),
);
const lhs_eq_zero = try self.wip.icmp(.eq, lhs, zero, "");
break :lhs_sat try self.wip.select(.normal, lhs_eq_zero, zero, umax, "");
},
};
return self.wip.select(.normal, in_range, result, lhs_sat, "");
}
fn airShr(self: *FuncGen, inst: Air.Inst.Index, is_exact: bool) !Builder.Value {

View File

@ -128,12 +128,12 @@ test "Saturating Shift Left where lhs is of a computed type" {
});
}
pub fn FixedPoint(comptime value_type: type) type {
pub fn FixedPoint(comptime ValueType: type) type {
return struct {
value: value_type,
value: ValueType,
exponent: ShiftType,
const ShiftType: type = getIntShiftType(value_type);
const ShiftType: type = getIntShiftType(ValueType);
pub fn shiftExponent(self: @This(), shift: ShiftType) @This() {
const shiftAbs = @abs(shift);
@ -199,8 +199,7 @@ test "Saturating Shift Left" {
try expectEqual(0xffffffffffffffffffffffffffffffff, S.shlSat(@as(u128, 0x0fffffffffffffff0fffffffffffffff), 5));
try expectEqual(-0x80000000000000000000000000000000, S.shlSat(@as(i128, -0x0fffffffffffffff0fffffffffffffff), 5));
// TODO
// try expectEqual(51146728248377216718956089012931236753385031969422887335676427626502090568823039920051095192592252455482604439493126109519019633529459266458258243583, S.shlSat(@as(i495, 0x2fe6bc5448c55ce18252e2c9d44777505dfe63ff249a8027a6626c7d8dd9893fd5731e51474727be556f757facb586a4e04bbc0148c6c7ad692302f46fbd), 0x31));
try expectEqual(51146728248377216718956089012931236753385031969422887335676427626502090568823039920051095192592252455482604439493126109519019633529459266458258243583, S.shlSat(@as(i495, 0x2fe6bc5448c55ce18252e2c9d44777505dfe63ff249a8027a6626c7d8dd9893fd5731e51474727be556f757facb586a4e04bbc0148c6c7ad692302f46fbd), 0x31));
try expectEqual(-57896044618658097711785492504343953926634992332820282019728792003956564819968, S.shlSat(@as(i256, -0x53d4148cee74ea43477a65b3daa7b8fdadcbf4508e793f4af113b8d8da5a7eb6), 0x91));
try expectEqual(170141183460469231731687303715884105727, S.shlSat(@as(i128, 0x2fe6bc5448c55ce18252e2c9d4477750), 0x31));
try expectEqual(0, S.shlSat(@as(i128, 0), 127));

View File

@ -6,7 +6,6 @@ var x: u8 = 1;
// This excludes builtin functions that return void or noreturn that cannot be tested.
test {
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO

View File

@ -57,10 +57,6 @@ test "exporting using namespace access" {
test "exporting comptime-known value" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 and
(builtin.target.ofmt != .elf and
builtin.target.ofmt != .macho and
builtin.target.ofmt != .coff)) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;

View File

@ -3,13 +3,13 @@ const builtin = @import("builtin");
const expect = std.testing.expect;
test "memmove and memset intrinsics" {
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
try testMemmoveMemset();
try comptime testMemmoveMemset();
@ -33,13 +33,13 @@ fn testMemmoveMemset() !void {
}
test "@memmove with both operands single-ptr-to-array, one is null-terminated" {
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
try testMemmoveBothSinglePtrArrayOneIsNullTerminated();
try comptime testMemmoveBothSinglePtrArrayOneIsNullTerminated();
@ -79,13 +79,13 @@ fn testMemmoveBothSinglePtrArrayOneIsNullTerminated() !void {
}
test "@memmove dest many pointer" {
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
try testMemmoveDestManyPtr();
try comptime testMemmoveDestManyPtr();
@ -123,13 +123,13 @@ fn testMemmoveDestManyPtr() !void {
}
test "@memmove slice" {
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
try testMemmoveSlice();
try comptime testMemmoveSlice();

View File

@ -53,12 +53,12 @@ test "saturating add" {
test "saturating add 128bit" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
const S = struct {
fn doTheTest() !void {
@ -127,12 +127,12 @@ test "saturating subtraction" {
test "saturating subtraction 128bit" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
const S = struct {
fn doTheTest() !void {
@ -230,9 +230,10 @@ test "saturating multiplication <= 32 bits" {
try testSatMul(i32, 10, -12, -120);
}
// TODO: remove this test, integrate into general test
test "saturating mul i64, i128, wasm only" {
if (builtin.zig_backend != .stage2_wasm) return error.SkipZigTest;
test "saturating mul i64, i128" {
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
try testSatMul(i64, 0, maxInt(i64), 0);
try testSatMul(i64, 0, minInt(i64), 0);
@ -259,13 +260,13 @@ test "saturating mul i64, i128, wasm only" {
test "saturating multiplication" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c and builtin.cpu.arch.isArm()) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_llvm and builtin.cpu.arch.isWasm()) {
// https://github.com/ziglang/zig/issues/9660
@ -298,29 +299,34 @@ test "saturating multiplication" {
}
test "saturating shift-left" {
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
const S = struct {
fn doTheTest() !void {
try testSatShl(i8, 1, 2, 4);
try testSatShl(i8, 127, 1, 127);
try testSatShl(i8, -128, 1, -128);
try testSatShl(i8, 1, u8, 2, 4);
try testSatShl(i8, 127, u8, 1, 127);
try testSatShl(i8, -128, u8, 1, -128);
// TODO: remove this check once #9668 is completed
if (!builtin.cpu.arch.isWasm()) {
// skip testing ints > 64 bits on wasm due to miscompilation / wasmtime ci error
try testSatShl(i128, maxInt(i128), 64, maxInt(i128));
try testSatShl(u128, maxInt(u128), 64, maxInt(u128));
try testSatShl(i128, maxInt(i128), u128, 64, maxInt(i128));
try testSatShl(u128, maxInt(u128), u128, 64, maxInt(u128));
}
try testSatShl(u8, 1, 2, 4);
try testSatShl(u8, 255, 1, 255);
try testSatShl(u8, 1, u8, 2, 4);
try testSatShl(u8, 255, u8, 1, 255);
try testSatShl(i8, -3, u4, 8, minInt(i8));
try testSatShl(i8, 0, u4, 8, 0);
try testSatShl(i8, 3, u4, 8, maxInt(i8));
try testSatShl(u8, 0, u4, 8, 0);
try testSatShl(u8, 3, u4, 8, maxInt(u8));
}
fn testSatShl(comptime T: type, lhs: T, rhs: T, expected: T) !void {
fn testSatShl(comptime Lhs: type, lhs: Lhs, comptime Rhs: type, rhs: Rhs, expected: Lhs) !void {
try expect((lhs <<| rhs) == expected);
var x = lhs;
@ -332,19 +338,37 @@ test "saturating shift-left" {
try S.doTheTest();
try comptime S.doTheTest();
try comptime S.testSatShl(comptime_int, 0, 0, 0);
try comptime S.testSatShl(comptime_int, 1, 2, 4);
try comptime S.testSatShl(comptime_int, 13, 150, 18554220005177478453757717602843436772975706112);
try comptime S.testSatShl(comptime_int, -582769, 180, -893090893854873184096635538665358532628308979495815656505344);
try comptime S.testSatShl(comptime_int, 0, comptime_int, 0, 0);
try comptime S.testSatShl(comptime_int, 1, comptime_int, 2, 4);
try comptime S.testSatShl(comptime_int, 13, comptime_int, 150, 18554220005177478453757717602843436772975706112);
try comptime S.testSatShl(comptime_int, -582769, comptime_int, 180, -893090893854873184096635538665358532628308979495815656505344);
}
test "saturating shift-left large rhs" {
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
{
var lhs: u8 = undefined;
lhs = 1;
const ct_rhs: u1024 = 1 << 1023;
var rt_rhs: u1024 = undefined;
rt_rhs = ct_rhs;
try expect(lhs <<| ct_rhs == maxInt(u8));
try expect(lhs <<| rt_rhs == maxInt(u8));
}
}
test "saturating shl uses the LHS type" {
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
const lhs_const: u8 = 1;
var lhs_var: u8 = 1;

View File

@ -31,8 +31,7 @@ test "vector wrap operators" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 and
!comptime std.Target.x86.featureSetHas(builtin.cpu.features, .sse4_1)) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
const S = struct {
fn doTheTest() !void {
@ -350,12 +349,12 @@ test "vector casts of sizes not divisible by 8" {
}
test "vector @splat" {
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_llvm and
builtin.os.tag == .macos)
@ -930,12 +929,12 @@ test "mask parameter of @shuffle is comptime scope" {
test "saturating add" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
const S = struct {
fn doTheTest() !void {
@ -965,12 +964,12 @@ test "saturating add" {
test "saturating subtraction" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
const S = struct {
fn doTheTest() !void {
@ -1392,7 +1391,6 @@ test "store packed vector element" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.cpu.arch == .aarch64_be and builtin.zig_backend == .stage2_llvm) return error.SkipZigTest;
@ -1511,9 +1509,6 @@ test "boolean vector with 2 or more booleans" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
// TODO: try removing this after <https://github.com/ziglang/zig/issues/13782>:
if (!(builtin.os.tag == .linux and builtin.cpu.arch == .x86_64)) return;
const vec1 = @Vector(2, bool){ true, true };
_ = vec1;

View File

@ -6,6 +6,7 @@ const DoubleBits = math.DoubleBits;
const fmax = math.fmax;
const fmin = math.fmin;
const Gpr = math.Gpr;
const imax = math.imax;
const inf = math.inf;
const Log2Int = math.Log2Int;
const math = @import("math.zig");
@ -2615,263 +2616,23 @@ fn binary(comptime op: anytype, comptime opts: struct { compare: Compare = .rela
0x1b, 0x61, 0x73, 0x63, 0x2c, 0x35, 0x25, 0x19, 0x09, 0x0c, 0x75, 0x5d, 0x01, 0x29, 0x3b, 0x0c,
});
try testArgs(@Vector(128, u7), .{
0x5c,
0x65,
0x65,
0x34,
0x31,
0x03,
0x7a,
0x56,
0x16,
0x74,
0x5c,
0x7f,
0x2a,
0x46,
0x2a,
0x5f,
0x62,
0x06,
0x51,
0x23,
0x58,
0x1f,
0x5a,
0x2d,
0x29,
0x21,
0x26,
0x5a,
0x5a,
0x13,
0x13,
0x46,
0x26,
0x1c,
0x06,
0x2d,
0x08,
0x52,
0x5b,
0x6f,
0x2d,
0x4a,
0x00,
0x40,
0x68,
0x27,
0x00,
0x4a,
0x3a,
0x22,
0x2d,
0x5b,
0x05,
0x26,
0x4e,
0x6f,
0x46,
0x4d,
0x14,
0x70,
0x51,
0x04,
0x66,
0x13,
0x4c,
0x7c,
0x67,
0x23,
0x13,
0x55,
0x1b,
0x30,
0x7d,
0x04,
0x47,
0x78,
0x05,
0x09,
0x5a,
0x20,
0x2e,
0x17,
0x11,
0x49,
0x6c,
0x5e,
0x34,
0x3e,
0x66,
0x60,
0x5d,
0x75,
0x48,
0x1d,
0x69,
0x67,
0x40,
0x2d,
0x7b,
0x31,
0x13,
0x60,
0x19,
0x2f,
0x3e,
0x7d,
0x23,
0x6a,
0x0e,
0x16,
0x44,
0x34,
0x5d,
0x5a,
0x2a,
0x0b,
0x64,
0x07,
0x22,
0x5b,
0x24,
0x22,
0x3b,
0x46,
0x23,
0x65,
0x5d,
0x34,
0x5c, 0x65, 0x65, 0x34, 0x31, 0x03, 0x7a, 0x56, 0x16, 0x74, 0x5c, 0x7f, 0x2a, 0x46, 0x2a, 0x5f,
0x62, 0x06, 0x51, 0x23, 0x58, 0x1f, 0x5a, 0x2d, 0x29, 0x21, 0x26, 0x5a, 0x5a, 0x13, 0x13, 0x46,
0x26, 0x1c, 0x06, 0x2d, 0x08, 0x52, 0x5b, 0x6f, 0x2d, 0x4a, 0x00, 0x40, 0x68, 0x27, 0x00, 0x4a,
0x3a, 0x22, 0x2d, 0x5b, 0x05, 0x26, 0x4e, 0x6f, 0x46, 0x4d, 0x14, 0x70, 0x51, 0x04, 0x66, 0x13,
0x4c, 0x7c, 0x67, 0x23, 0x13, 0x55, 0x1b, 0x30, 0x7d, 0x04, 0x47, 0x78, 0x05, 0x09, 0x5a, 0x20,
0x2e, 0x17, 0x11, 0x49, 0x6c, 0x5e, 0x34, 0x3e, 0x66, 0x60, 0x5d, 0x75, 0x48, 0x1d, 0x69, 0x67,
0x40, 0x2d, 0x7b, 0x31, 0x13, 0x60, 0x19, 0x2f, 0x3e, 0x7d, 0x23, 0x6a, 0x0e, 0x16, 0x44, 0x34,
0x5d, 0x5a, 0x2a, 0x0b, 0x64, 0x07, 0x22, 0x5b, 0x24, 0x22, 0x3b, 0x46, 0x23, 0x65, 0x5d, 0x34,
}, .{
0x4b,
0x36,
0x7a,
0x13,
0x5a,
0x4b,
0x69,
0x4b,
0x1d,
0x02,
0x1b,
0x3f,
0x61,
0x21,
0x45,
0x48,
0x44,
0x61,
0x25,
0x42,
0x57,
0x7d,
0x7a,
0x45,
0x22,
0x2e,
0x44,
0x3f,
0x3a,
0x14,
0x07,
0x6e,
0x68,
0x51,
0x03,
0x6b,
0x11,
0x32,
0x6d,
0x6f,
0x44,
0x5a,
0x61,
0x6d,
0x71,
0x66,
0x54,
0x14,
0x5d,
0x56,
0x22,
0x5c,
0x3a,
0x72,
0x16,
0x39,
0x59,
0x3e,
0x27,
0x4d,
0x3d,
0x44,
0x72,
0x2c,
0x71,
0x74,
0x3b,
0x6c,
0x70,
0x39,
0x0f,
0x5c,
0x71,
0x04,
0x67,
0x02,
0x2c,
0x18,
0x0f,
0x14,
0x2d,
0x24,
0x51,
0x34,
0x6d,
0x0c,
0x19,
0x0f,
0x73,
0x79,
0x3d,
0x74,
0x20,
0x15,
0x22,
0x25,
0x09,
0x14,
0x09,
0x71,
0x2d,
0x6f,
0x09,
0x2e,
0x27,
0x75,
0x57,
0x62,
0x4d,
0x07,
0x62,
0x01,
0x41,
0x2d,
0x5d,
0x4c,
0x77,
0x10,
0x7f,
0x30,
0x0f,
0x50,
0x15,
0x39,
0x34,
0x7c,
0x33,
0x16,
0x4b, 0x36, 0x7a, 0x13, 0x5a, 0x4b, 0x69, 0x4b, 0x1d, 0x02, 0x1b, 0x3f, 0x61, 0x21, 0x45, 0x48,
0x44, 0x61, 0x25, 0x42, 0x57, 0x7d, 0x7a, 0x45, 0x22, 0x2e, 0x44, 0x3f, 0x3a, 0x14, 0x07, 0x6e,
0x68, 0x51, 0x03, 0x6b, 0x11, 0x32, 0x6d, 0x6f, 0x44, 0x5a, 0x61, 0x6d, 0x71, 0x66, 0x54, 0x14,
0x5d, 0x56, 0x22, 0x5c, 0x3a, 0x72, 0x16, 0x39, 0x59, 0x3e, 0x27, 0x4d, 0x3d, 0x44, 0x72, 0x2c,
0x71, 0x74, 0x3b, 0x6c, 0x70, 0x39, 0x0f, 0x5c, 0x71, 0x04, 0x67, 0x02, 0x2c, 0x18, 0x0f, 0x14,
0x2d, 0x24, 0x51, 0x34, 0x6d, 0x0c, 0x19, 0x0f, 0x73, 0x79, 0x3d, 0x74, 0x20, 0x15, 0x22, 0x25,
0x09, 0x14, 0x09, 0x71, 0x2d, 0x6f, 0x09, 0x2e, 0x27, 0x75, 0x57, 0x62, 0x4d, 0x07, 0x62, 0x01,
0x41, 0x2d, 0x5d, 0x4c, 0x77, 0x10, 0x7f, 0x30, 0x0f, 0x50, 0x15, 0x39, 0x34, 0x7c, 0x33, 0x16,
});
try testArgs(@Vector(1, i8), .{
@ -5282,6 +5043,15 @@ test addWrap {
try test_add_wrap.testIntVectors();
}
inline fn addSat(comptime Type: type, lhs: Type, rhs: Type) Type {
return lhs +| rhs;
}
test addSat {
const test_add_sat = binary(addSat, .{});
try test_add_sat.testInts();
try test_add_sat.testIntVectors();
}
inline fn subUnsafe(comptime Type: type, lhs: Type, rhs: Type) AddOneBit(Type) {
@setRuntimeSafety(false);
return switch (@typeInfo(Scalar(Type))) {
@ -5328,6 +5098,15 @@ test subWrap {
try test_sub_wrap.testIntVectors();
}
inline fn subSat(comptime Type: type, lhs: Type, rhs: Type) Type {
return lhs -| rhs;
}
test subSat {
const test_sub_sat = binary(subSat, .{});
try test_sub_sat.testInts();
try test_sub_sat.testIntVectors();
}
inline fn mulUnsafe(comptime Type: type, lhs: Type, rhs: Type) DoubleBits(Type) {
@setRuntimeSafety(false);
return @as(DoubleBits(Type), lhs) * rhs;
@ -5356,6 +5135,14 @@ test mulWrap {
try test_mul_wrap.testIntVectors();
}
inline fn mulSat(comptime Type: type, lhs: Type, rhs: Type) Type {
return lhs *| rhs;
}
test mulSat {
const test_mul_sat = binary(mulSat, .{});
try test_mul_sat.testInts();
}
inline fn multiply(comptime Type: type, lhs: Type, rhs: Type) @TypeOf(lhs * rhs) {
return lhs * rhs;
}
@ -5477,6 +5264,16 @@ test mulWithOverflow {
try test_mul_with_overflow.testInts();
}
inline fn shlWithOverflow(comptime Type: type, lhs: Type, rhs: Type) struct { Type, u1 } {
const bit_cast_rhs: @Type(.{ .int = .{ .signedness = .unsigned, .bits = @bitSizeOf(Type) } }) = @bitCast(rhs);
const truncate_rhs: Log2Int(Type) = @truncate(bit_cast_rhs);
return @shlWithOverflow(lhs, if (comptime cast(Log2Int(Type), @bitSizeOf(Type))) |bits| truncate_rhs % bits else truncate_rhs);
}
test shlWithOverflow {
const test_shl_with_overflow = binary(shlWithOverflow, .{});
try test_shl_with_overflow.testInts();
}
inline fn equal(comptime Type: type, lhs: Type, rhs: Type) @TypeOf(lhs == rhs) {
return lhs == rhs;
}
@ -5592,6 +5389,22 @@ test shlExactUnsafe {
try test_shl_exact_unsafe.testInts();
}
inline fn shlSat(comptime Type: type, lhs: Type, rhs: Type) Type {
// workaround https://github.com/ziglang/zig/issues/23034
if (@inComptime()) {
// workaround https://github.com/ziglang/zig/issues/23139
//return lhs <<| @min(@abs(rhs), imax(u64));
return lhs <<| @min(@abs(rhs), @as(u64, imax(u64)));
}
// workaround https://github.com/ziglang/zig/issues/23033
@setRuntimeSafety(false);
return lhs <<| @abs(rhs);
}
test shlSat {
const test_shl_sat = binary(shlSat, .{});
try test_shl_sat.testInts();
}
inline fn bitXor(comptime Type: type, lhs: Type, rhs: Type) @TypeOf(lhs ^ rhs) {
return lhs ^ rhs;
}

View File

@ -1,3 +1,7 @@
const math = @import("math.zig");
const imax = math.imax;
const imin = math.imin;
fn accessSlice(comptime array: anytype) !void {
var slice: []const @typeInfo(@TypeOf(array)).array.child = undefined;
slice = &array;
@ -38,13 +42,33 @@ test accessSlice {
fn accessVector(comptime init: anytype) !void {
const Vector = @TypeOf(init);
const Elem = @typeInfo(Vector).vector.child;
const ct_vals: [2]Elem = switch (Elem) {
bool => .{ false, true },
else => .{ imin(Elem), imax(Elem) },
};
var rt_vals: [2]Elem = undefined;
rt_vals = ct_vals;
var vector: Vector = undefined;
vector = init;
inline for (0..@typeInfo(Vector).vector.len) |ct_index| {
var rt_index: usize = undefined;
rt_index = ct_index;
if (&vector[rt_index] != &vector[ct_index]) return error.Unexpected;
if (vector[rt_index] != vector[ct_index]) return error.Unexpected;
if (vector[rt_index] != init[ct_index]) return error.Unexpected;
if (vector[ct_index] != init[ct_index]) return error.Unexpected;
vector[rt_index] = rt_vals[0];
if (vector[rt_index] != ct_vals[0]) return error.Unexpected;
if (vector[ct_index] != ct_vals[0]) return error.Unexpected;
vector[rt_index] = ct_vals[1];
if (vector[rt_index] != ct_vals[1]) return error.Unexpected;
if (vector[ct_index] != ct_vals[1]) return error.Unexpected;
vector[ct_index] = ct_vals[0];
if (vector[rt_index] != ct_vals[0]) return error.Unexpected;
if (vector[ct_index] != ct_vals[0]) return error.Unexpected;
vector[ct_index] = rt_vals[1];
if (vector[rt_index] != ct_vals[1]) return error.Unexpected;
if (vector[ct_index] != ct_vals[1]) return error.Unexpected;
}
}
test accessVector {

View File

@ -1818,3 +1818,12 @@ test optionalNotEqualNull {
try test_optional_not_equal_null.testInts();
try test_optional_not_equal_null.testFloats();
}
inline fn splat(comptime Type: type, lhs: Type) Type {
return @splat(lhs[0]);
}
test splat {
const test_splat = unary(splat, .{});
try test_splat.testIntVectors();
try test_splat.testFloatVectors();
}

View File

@ -117,9 +117,9 @@ export fn testMutablePointer() void {
// tmp.zig:37:38: note: imported here
// neg_inf.zon:1:1: error: expected type '?u8'
// tmp.zig:57:28: note: imported here
// neg_inf.zon:1:1: error: expected type 'tmp.testNonExhaustiveEnum__enum_492'
// neg_inf.zon:1:1: error: expected type 'tmp.testNonExhaustiveEnum__enum_496'
// tmp.zig:62:39: note: imported here
// neg_inf.zon:1:1: error: expected type 'tmp.testUntaggedUnion__union_494'
// neg_inf.zon:1:1: error: expected type 'tmp.testUntaggedUnion__union_498'
// tmp.zig:67:44: note: imported here
// neg_inf.zon:1:1: error: expected type 'tmp.testTaggedUnionVoid__union_497'
// neg_inf.zon:1:1: error: expected type 'tmp.testTaggedUnionVoid__union_501'
// tmp.zig:72:50: note: imported here

View File

@ -15,6 +15,6 @@ pub export fn entry() void {
// error
//
// :7:25: error: unable to resolve comptime value
// :7:25: note: initializer of comptime-only struct 'tmp.S.foo__anon_466.C' must be comptime-known
// :7:25: note: initializer of comptime-only struct 'tmp.S.foo__anon_470.C' must be comptime-known
// :4:16: note: struct requires comptime because of this field
// :4:16: note: types are not available at runtime

View File

@ -16,5 +16,5 @@ pub export fn entry2() void {
//
// :3:6: error: no field or member function named 'copy' in '[]const u8'
// :9:8: error: no field or member function named 'bar' in '@TypeOf(.{})'
// :12:18: error: no field or member function named 'bar' in 'tmp.entry2__struct_470'
// :12:18: error: no field or member function named 'bar' in 'tmp.entry2__struct_474'
// :12:6: note: struct declared here

View File

@ -6,6 +6,6 @@ export fn foo() void {
// error
//
// :4:16: error: expected type 'tmp.T', found 'tmp.foo__struct_459'
// :4:16: error: expected type 'tmp.T', found 'tmp.foo__struct_463'
// :3:16: note: struct declared here
// :1:11: note: struct declared here

View File

@ -44,9 +44,9 @@ comptime {
//
// :5:23: error: expected error union type, found 'comptime_int'
// :10:23: error: expected error union type, found '@TypeOf(.{})'
// :15:23: error: expected error union type, found 'tmp.test2__struct_496'
// :15:23: error: expected error union type, found 'tmp.test2__struct_500'
// :15:23: note: struct declared here
// :20:27: error: expected error union type, found 'tmp.test3__struct_498'
// :20:27: error: expected error union type, found 'tmp.test3__struct_502'
// :20:27: note: struct declared here
// :25:23: error: expected error union type, found 'struct { comptime *const [5:0]u8 = "hello" }'
// :31:13: error: expected error union type, found 'u32'

View File

@ -1,12 +0,0 @@
export fn a() void {
comptime {
var x = @as(i32, 1);
x <<|= @as(i32, -2);
}
}
// error
// backend=stage2
// target=native
//
// :4:16: error: shift by negative amount '-2'

View File

@ -0,0 +1,36 @@
export fn a() void {
_ = @as(i32, 1) <<| @as(i32, -1);
}
comptime {
var x: i32 = 1;
x <<|= @as(i32, -2);
}
export fn b() void {
_ = @Vector(1, i32){1} <<| @Vector(1, i32){-3};
}
comptime {
var x: @Vector(2, i32) = .{ 1, 2 };
x <<|= @Vector(2, i32){ 0, -4 };
}
export fn c(rhs: i32) void {
_ = @as(i32, 1) <<| rhs;
}
export fn d(rhs: @Vector(3, i32)) void {
_ = @Vector(3, i32){ 1, 2, 3 } <<| rhs;
}
// error
// backend=stage2
// target=native
//
// :2:25: error: shift by negative amount '-1'
// :7:12: error: shift by negative amount '-2'
// :11:47: error: shift by negative amount '-3' at index '0'
// :16:27: error: shift by negative amount '-4' at index '1'
// :20:25: error: shift by signed type 'i32'
// :24:40: error: shift by signed type '@Vector(3, i32)'

View File

@ -1,9 +0,0 @@
export fn a() void {
_ = @as(i32, 1) <<| @as(i32, -2);
}
// error
// backend=stage2
// target=native
//
// :2:25: error: shift by negative amount '-2'