CBE: implement vector operations

Also, bigint add and sub which is all I was actually trying to do.
This commit is contained in:
Jacob Young 2023-03-05 00:01:15 -05:00
parent b2e9c0d0ff
commit c478c7609e
11 changed files with 835 additions and 505 deletions

660
lib/zig.h
View File

@ -612,12 +612,6 @@ static inline bool zig_addo_u32(uint32_t *res, uint32_t lhs, uint32_t rhs, uint8
#endif
}
static inline void zig_vaddo_u32(uint8_t *ov, uint32_t *res, int n,
const uint32_t *lhs, const uint32_t *rhs, uint8_t bits)
{
for (int i = 0; i < n; ++i) ov[i] = zig_addo_u32(&res[i], lhs[i], rhs[i], bits);
}
zig_extern int32_t __addosi4(int32_t lhs, int32_t rhs, int *overflow);
static inline bool zig_addo_i32(int32_t *res, int32_t lhs, int32_t rhs, uint8_t bits) {
#if zig_has_builtin(add_overflow) || defined(zig_gnuc)
@ -632,12 +626,6 @@ static inline bool zig_addo_i32(int32_t *res, int32_t lhs, int32_t rhs, uint8_t
return overflow || full_res < zig_minInt_i(32, bits) || full_res > zig_maxInt_i(32, bits);
}
static inline void zig_vaddo_i32(uint8_t *ov, int32_t *res, int n,
const int32_t *lhs, const int32_t *rhs, uint8_t bits)
{
for (int i = 0; i < n; ++i) ov[i] = zig_addo_i32(&res[i], lhs[i], rhs[i], bits);
}
static inline bool zig_addo_u64(uint64_t *res, uint64_t lhs, uint64_t rhs, uint8_t bits) {
#if zig_has_builtin(add_overflow) || defined(zig_gnuc)
uint64_t full_res;
@ -650,12 +638,6 @@ static inline bool zig_addo_u64(uint64_t *res, uint64_t lhs, uint64_t rhs, uint8
#endif
}
static inline void zig_vaddo_u64(uint8_t *ov, uint64_t *res, int n,
const uint64_t *lhs, const uint64_t *rhs, uint8_t bits)
{
for (int i = 0; i < n; ++i) ov[i] = zig_addo_u64(&res[i], lhs[i], rhs[i], bits);
}
zig_extern int64_t __addodi4(int64_t lhs, int64_t rhs, int *overflow);
static inline bool zig_addo_i64(int64_t *res, int64_t lhs, int64_t rhs, uint8_t bits) {
#if zig_has_builtin(add_overflow) || defined(zig_gnuc)
@ -670,12 +652,6 @@ static inline bool zig_addo_i64(int64_t *res, int64_t lhs, int64_t rhs, uint8_t
return overflow || full_res < zig_minInt_i(64, bits) || full_res > zig_maxInt_i(64, bits);
}
static inline void zig_vaddo_i64(uint8_t *ov, int64_t *res, int n,
const int64_t *lhs, const int64_t *rhs, uint8_t bits)
{
for (int i = 0; i < n; ++i) ov[i] = zig_addo_i64(&res[i], lhs[i], rhs[i], bits);
}
static inline bool zig_addo_u8(uint8_t *res, uint8_t lhs, uint8_t rhs, uint8_t bits) {
#if zig_has_builtin(add_overflow) || defined(zig_gnuc)
uint8_t full_res;
@ -690,12 +666,6 @@ static inline bool zig_addo_u8(uint8_t *res, uint8_t lhs, uint8_t rhs, uint8_t b
#endif
}
static inline void zig_vaddo_u8(uint8_t *ov, uint8_t *res, int n,
const uint8_t *lhs, const uint8_t *rhs, uint8_t bits)
{
for (int i = 0; i < n; ++i) ov[i] = zig_addo_u8(&res[i], lhs[i], rhs[i], bits);
}
static inline bool zig_addo_i8(int8_t *res, int8_t lhs, int8_t rhs, uint8_t bits) {
#if zig_has_builtin(add_overflow) || defined(zig_gnuc)
int8_t full_res;
@ -710,12 +680,6 @@ static inline bool zig_addo_i8(int8_t *res, int8_t lhs, int8_t rhs, uint8_t bits
#endif
}
static inline void zig_vaddo_i8(uint8_t *ov, int8_t *res, int n,
const int8_t *lhs, const int8_t *rhs, uint8_t bits)
{
for (int i = 0; i < n; ++i) ov[i] = zig_addo_i8(&res[i], lhs[i], rhs[i], bits);
}
static inline bool zig_addo_u16(uint16_t *res, uint16_t lhs, uint16_t rhs, uint8_t bits) {
#if zig_has_builtin(add_overflow) || defined(zig_gnuc)
uint16_t full_res;
@ -730,12 +694,6 @@ static inline bool zig_addo_u16(uint16_t *res, uint16_t lhs, uint16_t rhs, uint8
#endif
}
static inline void zig_vaddo_u16(uint8_t *ov, uint16_t *res, int n,
const uint16_t *lhs, const uint16_t *rhs, uint8_t bits)
{
for (int i = 0; i < n; ++i) ov[i] = zig_addo_u16(&res[i], lhs[i], rhs[i], bits);
}
static inline bool zig_addo_i16(int16_t *res, int16_t lhs, int16_t rhs, uint8_t bits) {
#if zig_has_builtin(add_overflow) || defined(zig_gnuc)
int16_t full_res;
@ -750,12 +708,6 @@ static inline bool zig_addo_i16(int16_t *res, int16_t lhs, int16_t rhs, uint8_t
#endif
}
static inline void zig_vaddo_i16(uint8_t *ov, int16_t *res, int n,
const int16_t *lhs, const int16_t *rhs, uint8_t bits)
{
for (int i = 0; i < n; ++i) ov[i] = zig_addo_i16(&res[i], lhs[i], rhs[i], bits);
}
static inline bool zig_subo_u32(uint32_t *res, uint32_t lhs, uint32_t rhs, uint8_t bits) {
#if zig_has_builtin(sub_overflow) || defined(zig_gnuc)
uint32_t full_res;
@ -768,12 +720,6 @@ static inline bool zig_subo_u32(uint32_t *res, uint32_t lhs, uint32_t rhs, uint8
#endif
}
static inline void zig_vsubo_u32(uint8_t *ov, uint32_t *res, int n,
const uint32_t *lhs, const uint32_t *rhs, uint8_t bits)
{
for (int i = 0; i < n; ++i) ov[i] = zig_subo_u32(&res[i], lhs[i], rhs[i], bits);
}
zig_extern int32_t __subosi4(int32_t lhs, int32_t rhs, int *overflow);
static inline bool zig_subo_i32(int32_t *res, int32_t lhs, int32_t rhs, uint8_t bits) {
#if zig_has_builtin(sub_overflow) || defined(zig_gnuc)
@ -788,12 +734,6 @@ static inline bool zig_subo_i32(int32_t *res, int32_t lhs, int32_t rhs, uint8_t
return overflow || full_res < zig_minInt_i(32, bits) || full_res > zig_maxInt_i(32, bits);
}
static inline void zig_vsubo_i32(uint8_t *ov, int32_t *res, int n,
const int32_t *lhs, const int32_t *rhs, uint8_t bits)
{
for (int i = 0; i < n; ++i) ov[i] = zig_subo_i32(&res[i], lhs[i], rhs[i], bits);
}
static inline bool zig_subo_u64(uint64_t *res, uint64_t lhs, uint64_t rhs, uint8_t bits) {
#if zig_has_builtin(sub_overflow) || defined(zig_gnuc)
uint64_t full_res;
@ -806,12 +746,6 @@ static inline bool zig_subo_u64(uint64_t *res, uint64_t lhs, uint64_t rhs, uint8
#endif
}
static inline void zig_vsubo_u64(uint8_t *ov, uint64_t *res, int n,
const uint64_t *lhs, const uint64_t *rhs, uint8_t bits)
{
for (int i = 0; i < n; ++i) ov[i] = zig_subo_u64(&res[i], lhs[i], rhs[i], bits);
}
zig_extern int64_t __subodi4(int64_t lhs, int64_t rhs, int *overflow);
static inline bool zig_subo_i64(int64_t *res, int64_t lhs, int64_t rhs, uint8_t bits) {
#if zig_has_builtin(sub_overflow) || defined(zig_gnuc)
@ -826,12 +760,6 @@ static inline bool zig_subo_i64(int64_t *res, int64_t lhs, int64_t rhs, uint8_t
return overflow || full_res < zig_minInt_i(64, bits) || full_res > zig_maxInt_i(64, bits);
}
static inline void zig_vsubo_i64(uint8_t *ov, int64_t *res, int n,
const int64_t *lhs, const int64_t *rhs, uint8_t bits)
{
for (int i = 0; i < n; ++i) ov[i] = zig_subo_i64(&res[i], lhs[i], rhs[i], bits);
}
static inline bool zig_subo_u8(uint8_t *res, uint8_t lhs, uint8_t rhs, uint8_t bits) {
#if zig_has_builtin(sub_overflow) || defined(zig_gnuc)
uint8_t full_res;
@ -846,12 +774,6 @@ static inline bool zig_subo_u8(uint8_t *res, uint8_t lhs, uint8_t rhs, uint8_t b
#endif
}
static inline void zig_vsubo_u8(uint8_t *ov, uint8_t *res, int n,
const uint8_t *lhs, const uint8_t *rhs, uint8_t bits)
{
for (int i = 0; i < n; ++i) ov[i] = zig_subo_u8(&res[i], lhs[i], rhs[i], bits);
}
static inline bool zig_subo_i8(int8_t *res, int8_t lhs, int8_t rhs, uint8_t bits) {
#if zig_has_builtin(sub_overflow) || defined(zig_gnuc)
int8_t full_res;
@ -866,13 +788,6 @@ static inline bool zig_subo_i8(int8_t *res, int8_t lhs, int8_t rhs, uint8_t bits
#endif
}
static inline void zig_vsubo_i8(uint8_t *ov, int8_t *res, int n,
const int8_t *lhs, const int8_t *rhs, uint8_t bits)
{
for (int i = 0; i < n; ++i) ov[i] = zig_subo_i8(&res[i], lhs[i], rhs[i], bits);
}
static inline bool zig_subo_u16(uint16_t *res, uint16_t lhs, uint16_t rhs, uint8_t bits) {
#if zig_has_builtin(sub_overflow) || defined(zig_gnuc)
uint16_t full_res;
@ -887,13 +802,6 @@ static inline bool zig_subo_u16(uint16_t *res, uint16_t lhs, uint16_t rhs, uint8
#endif
}
static inline void zig_vsubo_u16(uint8_t *ov, uint16_t *res, int n,
const uint16_t *lhs, const uint16_t *rhs, uint8_t bits)
{
for (int i = 0; i < n; ++i) ov[i] = zig_subo_u16(&res[i], lhs[i], rhs[i], bits);
}
static inline bool zig_subo_i16(int16_t *res, int16_t lhs, int16_t rhs, uint8_t bits) {
#if zig_has_builtin(sub_overflow) || defined(zig_gnuc)
int16_t full_res;
@ -908,12 +816,6 @@ static inline bool zig_subo_i16(int16_t *res, int16_t lhs, int16_t rhs, uint8_t
#endif
}
static inline void zig_vsubo_i16(uint8_t *ov, int16_t *res, int n,
const int16_t *lhs, const int16_t *rhs, uint8_t bits)
{
for (int i = 0; i < n; ++i) ov[i] = zig_subo_i16(&res[i], lhs[i], rhs[i], bits);
}
static inline bool zig_mulo_u32(uint32_t *res, uint32_t lhs, uint32_t rhs, uint8_t bits) {
#if zig_has_builtin(mul_overflow) || defined(zig_gnuc)
uint32_t full_res;
@ -926,12 +828,6 @@ static inline bool zig_mulo_u32(uint32_t *res, uint32_t lhs, uint32_t rhs, uint8
#endif
}
static inline void zig_vmulo_u32(uint8_t *ov, uint32_t *res, int n,
const uint32_t *lhs, const uint32_t *rhs, uint8_t bits)
{
for (int i = 0; i < n; ++i) ov[i] = zig_mulo_u32(&res[i], lhs[i], rhs[i], bits);
}
zig_extern int32_t __mulosi4(int32_t lhs, int32_t rhs, int *overflow);
static inline bool zig_mulo_i32(int32_t *res, int32_t lhs, int32_t rhs, uint8_t bits) {
#if zig_has_builtin(mul_overflow) || defined(zig_gnuc)
@ -946,12 +842,6 @@ static inline bool zig_mulo_i32(int32_t *res, int32_t lhs, int32_t rhs, uint8_t
return overflow || full_res < zig_minInt_i(32, bits) || full_res > zig_maxInt_i(32, bits);
}
static inline void zig_vmulo_i32(uint8_t *ov, int32_t *res, int n,
const int32_t *lhs, const int32_t *rhs, uint8_t bits)
{
for (int i = 0; i < n; ++i) ov[i] = zig_mulo_i32(&res[i], lhs[i], rhs[i], bits);
}
static inline bool zig_mulo_u64(uint64_t *res, uint64_t lhs, uint64_t rhs, uint8_t bits) {
#if zig_has_builtin(mul_overflow) || defined(zig_gnuc)
uint64_t full_res;
@ -964,12 +854,6 @@ static inline bool zig_mulo_u64(uint64_t *res, uint64_t lhs, uint64_t rhs, uint8
#endif
}
static inline void zig_vmulo_u64(uint8_t *ov, uint64_t *res, int n,
const uint64_t *lhs, const uint64_t *rhs, uint8_t bits)
{
for (int i = 0; i < n; ++i) ov[i] = zig_mulo_u64(&res[i], lhs[i], rhs[i], bits);
}
zig_extern int64_t __mulodi4(int64_t lhs, int64_t rhs, int *overflow);
static inline bool zig_mulo_i64(int64_t *res, int64_t lhs, int64_t rhs, uint8_t bits) {
#if zig_has_builtin(mul_overflow) || defined(zig_gnuc)
@ -984,12 +868,6 @@ static inline bool zig_mulo_i64(int64_t *res, int64_t lhs, int64_t rhs, uint8_t
return overflow || full_res < zig_minInt_i(64, bits) || full_res > zig_maxInt_i(64, bits);
}
static inline void zig_vmulo_i64(uint8_t *ov, int64_t *res, int n,
const int64_t *lhs, const int64_t *rhs, uint8_t bits)
{
for (int i = 0; i < n; ++i) ov[i] = zig_mulo_i64(&res[i], lhs[i], rhs[i], bits);
}
static inline bool zig_mulo_u8(uint8_t *res, uint8_t lhs, uint8_t rhs, uint8_t bits) {
#if zig_has_builtin(mul_overflow) || defined(zig_gnuc)
uint8_t full_res;
@ -1004,12 +882,6 @@ static inline bool zig_mulo_u8(uint8_t *res, uint8_t lhs, uint8_t rhs, uint8_t b
#endif
}
static inline void zig_vmulo_u8(uint8_t *ov, uint8_t *res, int n,
const uint8_t *lhs, const uint8_t *rhs, uint8_t bits)
{
for (int i = 0; i < n; ++i) ov[i] = zig_mulo_u8(&res[i], lhs[i], rhs[i], bits);
}
static inline bool zig_mulo_i8(int8_t *res, int8_t lhs, int8_t rhs, uint8_t bits) {
#if zig_has_builtin(mul_overflow) || defined(zig_gnuc)
int8_t full_res;
@ -1024,12 +896,6 @@ static inline bool zig_mulo_i8(int8_t *res, int8_t lhs, int8_t rhs, uint8_t bits
#endif
}
static inline void zig_vmulo_i8(uint8_t *ov, int8_t *res, int n,
const int8_t *lhs, const int8_t *rhs, uint8_t bits)
{
for (int i = 0; i < n; ++i) ov[i] = zig_mulo_i8(&res[i], lhs[i], rhs[i], bits);
}
static inline bool zig_mulo_u16(uint16_t *res, uint16_t lhs, uint16_t rhs, uint8_t bits) {
#if zig_has_builtin(mul_overflow) || defined(zig_gnuc)
uint16_t full_res;
@ -1044,12 +910,6 @@ static inline bool zig_mulo_u16(uint16_t *res, uint16_t lhs, uint16_t rhs, uint8
#endif
}
static inline void zig_vmulo_u16(uint8_t *ov, uint16_t *res, int n,
const uint16_t *lhs, const uint16_t *rhs, uint8_t bits)
{
for (int i = 0; i < n; ++i) ov[i] = zig_mulo_u16(&res[i], lhs[i], rhs[i], bits);
}
static inline bool zig_mulo_i16(int16_t *res, int16_t lhs, int16_t rhs, uint8_t bits) {
#if zig_has_builtin(mul_overflow) || defined(zig_gnuc)
int16_t full_res;
@ -1064,12 +924,6 @@ static inline bool zig_mulo_i16(int16_t *res, int16_t lhs, int16_t rhs, uint8_t
#endif
}
static inline void zig_vmulo_i16(uint8_t *ov, int16_t *res, int n,
const int16_t *lhs, const int16_t *rhs, uint8_t bits)
{
for (int i = 0; i < n; ++i) ov[i] = zig_mulo_i16(&res[i], lhs[i], rhs[i], bits);
}
#define zig_int_builtins(w) \
static inline bool zig_shlo_u##w(uint##w##_t *res, uint##w##_t lhs, uint8_t rhs, uint8_t bits) { \
*res = zig_shlw_u##w(lhs, rhs, bits); \
@ -2090,6 +1944,446 @@ static inline int32_t zig_cmp_big(const void *lhs, const void *rhs, bool is_sign
return 0;
}
static inline bool zig_addo_big(void *res, const void *lhs, const void *rhs, bool is_signed, uint16_t bits) {
uint8_t *res_bytes = res;
const uint8_t *lhs_bytes = lhs;
const uint8_t *rhs_bytes = rhs;
uint16_t byte_offset = 0;
uint16_t remaining_bytes = zig_int_bytes(bits);
uint16_t top_bits = remaining_bytes * 8 - bits;
bool overflow = false;
#if zig_big_endian
byte_offset = remaining_bytes;
#endif
while (remaining_bytes >= 128 / CHAR_BIT) {
uint16_t limb_bits = 128 - (remaining_bytes == 128 / CHAR_BIT ? top_bits : 0);
#if zig_big_endian
byte_offset -= 128 / CHAR_BIT;
#endif
if (remaining_bytes == 128 / CHAR_BIT && is_signed) {
zig_i128 res_limb;
zig_i128 tmp_limb;
zig_i128 lhs_limb;
zig_i128 rhs_limb;
bool limb_overflow;
memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb));
memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb));
limb_overflow = zig_addo_i128(&tmp_limb, lhs_limb, rhs_limb, limb_bits);
overflow = limb_overflow ^ zig_addo_i128(&res_limb, tmp_limb, zig_make_i128(INT64_C(0), overflow ? UINT64_C(1) : UINT64_C(0)), limb_bits);
memcpy(&res_bytes[byte_offset], &res_limb, sizeof(res_limb));
} else {
zig_u128 res_limb;
zig_u128 tmp_limb;
zig_u128 lhs_limb;
zig_u128 rhs_limb;
bool limb_overflow;
memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb));
memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb));
limb_overflow = zig_addo_u128(&tmp_limb, lhs_limb, rhs_limb, limb_bits);
overflow = limb_overflow ^ zig_addo_u128(&res_limb, tmp_limb, zig_make_u128(UINT64_C(0), overflow ? UINT64_C(1) : UINT64_C(0)), limb_bits);
memcpy(&res_bytes[byte_offset], &res_limb, sizeof(res_limb));
}
remaining_bytes -= 128 / CHAR_BIT;
#if zig_little_endian
byte_offset += 128 / CHAR_BIT;
#endif
}
while (remaining_bytes >= 64 / CHAR_BIT) {
uint16_t limb_bits = 64 - (remaining_bytes == 64 / CHAR_BIT ? top_bits : 0);
#if zig_big_endian
byte_offset -= 64 / CHAR_BIT;
#endif
if (remaining_bytes == 64 / CHAR_BIT && is_signed) {
int64_t res_limb;
int64_t tmp_limb;
int64_t lhs_limb;
int64_t rhs_limb;
bool limb_overflow;
memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb));
memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb));
limb_overflow = zig_addo_i64(&tmp_limb, lhs_limb, rhs_limb, limb_bits);
overflow = limb_overflow ^ zig_addo_i64(&res_limb, tmp_limb, overflow ? INT64_C(1) : INT64_C(0), limb_bits);
memcpy(&res_bytes[byte_offset], &res_limb, sizeof(res_limb));
} else {
uint64_t res_limb;
uint64_t tmp_limb;
uint64_t lhs_limb;
uint64_t rhs_limb;
bool limb_overflow;
memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb));
memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb));
limb_overflow = zig_addo_u64(&tmp_limb, lhs_limb, rhs_limb, limb_bits);
overflow = limb_overflow ^ zig_addo_u64(&res_limb, tmp_limb, overflow ? UINT64_C(1) : UINT64_C(0), limb_bits);
memcpy(&res_bytes[byte_offset], &res_limb, sizeof(res_limb));
}
remaining_bytes -= 64 / CHAR_BIT;
#if zig_little_endian
byte_offset += 64 / CHAR_BIT;
#endif
}
while (remaining_bytes >= 32 / CHAR_BIT) {
uint16_t limb_bits = 32 - (remaining_bytes == 32 / CHAR_BIT ? top_bits : 0);
#if zig_big_endian
byte_offset -= 32 / CHAR_BIT;
#endif
if (remaining_bytes == 32 / CHAR_BIT && is_signed) {
int32_t res_limb;
int32_t tmp_limb;
int32_t lhs_limb;
int32_t rhs_limb;
bool limb_overflow;
memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb));
memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb));
limb_overflow = zig_addo_i32(&tmp_limb, lhs_limb, rhs_limb, limb_bits);
overflow = limb_overflow ^ zig_addo_i32(&res_limb, tmp_limb, overflow ? INT32_C(1) : INT32_C(0), limb_bits);
memcpy(&res_bytes[byte_offset], &res_limb, sizeof(res_limb));
} else {
uint32_t res_limb;
uint32_t tmp_limb;
uint32_t lhs_limb;
uint32_t rhs_limb;
bool limb_overflow;
memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb));
memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb));
limb_overflow = zig_addo_u32(&tmp_limb, lhs_limb, rhs_limb, limb_bits);
overflow = limb_overflow ^ zig_addo_u32(&res_limb, tmp_limb, overflow ? UINT32_C(1) : UINT32_C(0), limb_bits);
memcpy(&res_bytes[byte_offset], &res_limb, sizeof(res_limb));
}
remaining_bytes -= 32 / CHAR_BIT;
#if zig_little_endian
byte_offset += 32 / CHAR_BIT;
#endif
}
while (remaining_bytes >= 16 / CHAR_BIT) {
uint16_t limb_bits = 16 - (remaining_bytes == 16 / CHAR_BIT ? top_bits : 0);
#if zig_big_endian
byte_offset -= 16 / CHAR_BIT;
#endif
if (remaining_bytes == 16 / CHAR_BIT && is_signed) {
int16_t res_limb;
int16_t tmp_limb;
int16_t lhs_limb;
int16_t rhs_limb;
bool limb_overflow;
memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb));
memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb));
limb_overflow = zig_addo_i16(&tmp_limb, lhs_limb, rhs_limb, limb_bits);
overflow = limb_overflow ^ zig_addo_i16(&res_limb, tmp_limb, overflow ? INT16_C(1) : INT16_C(0), limb_bits);
memcpy(&res_bytes[byte_offset], &res_limb, sizeof(res_limb));
} else {
uint16_t res_limb;
uint16_t tmp_limb;
uint16_t lhs_limb;
uint16_t rhs_limb;
bool limb_overflow;
memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb));
memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb));
limb_overflow = zig_addo_u16(&tmp_limb, lhs_limb, rhs_limb, limb_bits);
overflow = limb_overflow ^ zig_addo_u16(&res_limb, tmp_limb, overflow ? UINT16_C(1) : UINT16_C(0), limb_bits);
memcpy(&res_bytes[byte_offset], &res_limb, sizeof(res_limb));
}
remaining_bytes -= 16 / CHAR_BIT;
#if zig_little_endian
byte_offset += 16 / CHAR_BIT;
#endif
}
while (remaining_bytes >= 8 / CHAR_BIT) {
uint16_t limb_bits = 8 - (remaining_bytes == 8 / CHAR_BIT ? top_bits : 0);
#if zig_big_endian
byte_offset -= 8 / CHAR_BIT;
#endif
if (remaining_bytes == 8 / CHAR_BIT && is_signed) {
int8_t res_limb;
int8_t tmp_limb;
int8_t lhs_limb;
int8_t rhs_limb;
bool limb_overflow;
memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb));
memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb));
limb_overflow = zig_addo_i8(&tmp_limb, lhs_limb, rhs_limb, limb_bits);
overflow = limb_overflow ^ zig_addo_i8(&res_limb, tmp_limb, overflow ? INT8_C(1) : INT8_C(0), limb_bits);
memcpy(&res_bytes[byte_offset], &res_limb, sizeof(res_limb));
} else {
uint8_t res_limb;
uint8_t tmp_limb;
uint8_t lhs_limb;
uint8_t rhs_limb;
bool limb_overflow;
memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb));
memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb));
limb_overflow = zig_addo_u8(&tmp_limb, lhs_limb, rhs_limb, limb_bits);
overflow = limb_overflow ^ zig_addo_u8(&res_limb, tmp_limb, overflow ? UINT8_C(1) : UINT8_C(0), limb_bits);
memcpy(&res_bytes[byte_offset], &res_limb, sizeof(res_limb));
}
remaining_bytes -= 8 / CHAR_BIT;
#if zig_little_endian
byte_offset += 8 / CHAR_BIT;
#endif
}
return overflow;
}
static inline bool zig_subo_big(void *res, const void *lhs, const void *rhs, bool is_signed, uint16_t bits) {
uint8_t *res_bytes = res;
const uint8_t *lhs_bytes = lhs;
const uint8_t *rhs_bytes = rhs;
uint16_t byte_offset = 0;
uint16_t remaining_bytes = zig_int_bytes(bits);
uint16_t top_bits = remaining_bytes * 8 - bits;
bool overflow = false;
#if zig_big_endian
byte_offset = remaining_bytes;
#endif
while (remaining_bytes >= 128 / CHAR_BIT) {
uint16_t limb_bits = 128 - (remaining_bytes == 128 / CHAR_BIT ? top_bits : 0);
#if zig_big_endian
byte_offset -= 128 / CHAR_BIT;
#endif
if (remaining_bytes == 128 / CHAR_BIT && is_signed) {
zig_i128 res_limb;
zig_i128 tmp_limb;
zig_i128 lhs_limb;
zig_i128 rhs_limb;
bool limb_overflow;
memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb));
memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb));
limb_overflow = zig_subo_i128(&tmp_limb, lhs_limb, rhs_limb, limb_bits);
overflow = limb_overflow ^ zig_subo_i128(&res_limb, tmp_limb, zig_make_i128(INT64_C(0), overflow ? UINT64_C(1) : UINT64_C(0)), limb_bits);
memcpy(&res_bytes[byte_offset], &res_limb, sizeof(res_limb));
} else {
zig_u128 res_limb;
zig_u128 tmp_limb;
zig_u128 lhs_limb;
zig_u128 rhs_limb;
bool limb_overflow;
memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb));
memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb));
limb_overflow = zig_subo_u128(&tmp_limb, lhs_limb, rhs_limb, limb_bits);
overflow = limb_overflow ^ zig_subo_u128(&res_limb, tmp_limb, zig_make_u128(UINT64_C(0), overflow ? UINT64_C(1) : UINT64_C(0)), limb_bits);
memcpy(&res_bytes[byte_offset], &res_limb, sizeof(res_limb));
}
remaining_bytes -= 128 / CHAR_BIT;
#if zig_little_endian
byte_offset += 128 / CHAR_BIT;
#endif
}
while (remaining_bytes >= 64 / CHAR_BIT) {
uint16_t limb_bits = 64 - (remaining_bytes == 64 / CHAR_BIT ? top_bits : 0);
#if zig_big_endian
byte_offset -= 64 / CHAR_BIT;
#endif
if (remaining_bytes == 64 / CHAR_BIT && is_signed) {
int64_t res_limb;
int64_t tmp_limb;
int64_t lhs_limb;
int64_t rhs_limb;
bool limb_overflow;
memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb));
memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb));
limb_overflow = zig_subo_i64(&tmp_limb, lhs_limb, rhs_limb, limb_bits);
overflow = limb_overflow ^ zig_subo_i64(&res_limb, tmp_limb, overflow ? INT64_C(1) : INT64_C(0), limb_bits);
memcpy(&res_bytes[byte_offset], &res_limb, sizeof(res_limb));
} else {
uint64_t res_limb;
uint64_t tmp_limb;
uint64_t lhs_limb;
uint64_t rhs_limb;
bool limb_overflow;
memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb));
memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb));
limb_overflow = zig_subo_u64(&tmp_limb, lhs_limb, rhs_limb, limb_bits);
overflow = limb_overflow ^ zig_subo_u64(&res_limb, tmp_limb, overflow ? UINT64_C(1) : UINT64_C(0), limb_bits);
memcpy(&res_bytes[byte_offset], &res_limb, sizeof(res_limb));
}
remaining_bytes -= 64 / CHAR_BIT;
#if zig_little_endian
byte_offset += 64 / CHAR_BIT;
#endif
}
while (remaining_bytes >= 32 / CHAR_BIT) {
uint16_t limb_bits = 32 - (remaining_bytes == 32 / CHAR_BIT ? top_bits : 0);
#if zig_big_endian
byte_offset -= 32 / CHAR_BIT;
#endif
if (remaining_bytes == 32 / CHAR_BIT && is_signed) {
int32_t res_limb;
int32_t tmp_limb;
int32_t lhs_limb;
int32_t rhs_limb;
bool limb_overflow;
memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb));
memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb));
limb_overflow = zig_subo_i32(&tmp_limb, lhs_limb, rhs_limb, limb_bits);
overflow = limb_overflow ^ zig_subo_i32(&res_limb, tmp_limb, overflow ? INT32_C(1) : INT32_C(0), limb_bits);
memcpy(&res_bytes[byte_offset], &res_limb, sizeof(res_limb));
} else {
uint32_t res_limb;
uint32_t tmp_limb;
uint32_t lhs_limb;
uint32_t rhs_limb;
bool limb_overflow;
memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb));
memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb));
limb_overflow = zig_subo_u32(&tmp_limb, lhs_limb, rhs_limb, limb_bits);
overflow = limb_overflow ^ zig_subo_u32(&res_limb, tmp_limb, overflow ? UINT32_C(1) : UINT32_C(0), limb_bits);
memcpy(&res_bytes[byte_offset], &res_limb, sizeof(res_limb));
}
remaining_bytes -= 32 / CHAR_BIT;
#if zig_little_endian
byte_offset += 32 / CHAR_BIT;
#endif
}
while (remaining_bytes >= 16 / CHAR_BIT) {
uint16_t limb_bits = 16 - (remaining_bytes == 16 / CHAR_BIT ? top_bits : 0);
#if zig_big_endian
byte_offset -= 16 / CHAR_BIT;
#endif
if (remaining_bytes == 16 / CHAR_BIT && is_signed) {
int16_t res_limb;
int16_t tmp_limb;
int16_t lhs_limb;
int16_t rhs_limb;
bool limb_overflow;
memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb));
memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb));
limb_overflow = zig_subo_i16(&tmp_limb, lhs_limb, rhs_limb, limb_bits);
overflow = limb_overflow ^ zig_subo_i16(&res_limb, tmp_limb, overflow ? INT16_C(1) : INT16_C(0), limb_bits);
memcpy(&res_bytes[byte_offset], &res_limb, sizeof(res_limb));
} else {
uint16_t res_limb;
uint16_t tmp_limb;
uint16_t lhs_limb;
uint16_t rhs_limb;
bool limb_overflow;
memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb));
memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb));
limb_overflow = zig_subo_u16(&tmp_limb, lhs_limb, rhs_limb, limb_bits);
overflow = limb_overflow ^ zig_subo_u16(&res_limb, tmp_limb, overflow ? UINT16_C(1) : UINT16_C(0), limb_bits);
memcpy(&res_bytes[byte_offset], &res_limb, sizeof(res_limb));
}
remaining_bytes -= 16 / CHAR_BIT;
#if zig_little_endian
byte_offset += 16 / CHAR_BIT;
#endif
}
while (remaining_bytes >= 8 / CHAR_BIT) {
uint16_t limb_bits = 8 - (remaining_bytes == 8 / CHAR_BIT ? top_bits : 0);
#if zig_big_endian
byte_offset -= 8 / CHAR_BIT;
#endif
if (remaining_bytes == 8 / CHAR_BIT && is_signed) {
int8_t res_limb;
int8_t tmp_limb;
int8_t lhs_limb;
int8_t rhs_limb;
bool limb_overflow;
memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb));
memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb));
limb_overflow = zig_subo_i8(&tmp_limb, lhs_limb, rhs_limb, limb_bits);
overflow = limb_overflow ^ zig_subo_i8(&res_limb, tmp_limb, overflow ? INT8_C(1) : INT8_C(0), limb_bits);
memcpy(&res_bytes[byte_offset], &res_limb, sizeof(res_limb));
} else {
uint8_t res_limb;
uint8_t tmp_limb;
uint8_t lhs_limb;
uint8_t rhs_limb;
bool limb_overflow;
memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb));
memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb));
limb_overflow = zig_subo_u8(&tmp_limb, lhs_limb, rhs_limb, limb_bits);
overflow = limb_overflow ^ zig_subo_u8(&res_limb, tmp_limb, overflow ? UINT8_C(1) : UINT8_C(0), limb_bits);
memcpy(&res_bytes[byte_offset], &res_limb, sizeof(res_limb));
}
remaining_bytes -= 8 / CHAR_BIT;
#if zig_little_endian
byte_offset += 8 / CHAR_BIT;
#endif
}
return overflow;
}
static inline void zig_addw_big(void *res, const void *lhs, const void *rhs, bool is_signed, uint16_t bits) {
(void)zig_addo_big(res, lhs, rhs, is_signed, bits);
}
static inline void zig_subw_big(void *res, const void *lhs, const void *rhs, bool is_signed, uint16_t bits) {
(void)zig_subo_big(res, lhs, rhs, is_signed, bits);
}
static inline uint16_t zig_clz_big(const void *val, bool is_signed, uint16_t bits) {
const uint8_t *val_bytes = val;
uint16_t byte_offset = 0;
@ -3092,80 +3386,6 @@ zig_msvc_atomics_128op(u128, max)
#endif /* _MSC_VER && (_M_IX86 || _M_X64) */
/* ============================= Vector Support ============================= */
#define zig_cmp_vec(operation, operator) \
static inline void zig_##operation##_vec(bool *result, const void *lhs, const void *rhs, uint32_t len, bool is_signed, uint16_t elem_bits) { \
uint32_t index = 0; \
const uint8_t *lhs_ptr = lhs; \
const uint8_t *rhs_ptr = rhs; \
uint16_t elem_bytes = zig_int_bytes(elem_bits); \
\
while (index < len) { \
result[index] = zig_cmp_big(lhs_ptr, rhs_ptr, is_signed, elem_bits) operator 0; \
lhs_ptr += elem_bytes; \
rhs_ptr += elem_bytes; \
index += 1; \
} \
}
zig_cmp_vec(eq, ==)
zig_cmp_vec(ne, !=)
zig_cmp_vec(lt, < )
zig_cmp_vec(le, <=)
zig_cmp_vec(gt, > )
zig_cmp_vec(ge, >=)
static inline void zig_clz_vec(void *result, const void *val, uint32_t len, bool is_signed, uint16_t elem_bits) {
uint32_t index = 0;
const uint8_t *val_ptr = val;
uint16_t elem_bytes = zig_int_bytes(elem_bits);
while (index < len) {
uint16_t lz = zig_clz_big(val_ptr, is_signed, elem_bits);
if (elem_bits <= 128) {
((uint8_t *)result)[index] = (uint8_t)lz;
} else {
((uint16_t *)result)[index] = lz;
}
val_ptr += elem_bytes;
index += 1;
}
}
static inline void zig_ctz_vec(void *result, const void *val, uint32_t len, bool is_signed, uint16_t elem_bits) {
uint32_t index = 0;
const uint8_t *val_ptr = val;
uint16_t elem_bytes = zig_int_bytes(elem_bits);
while (index < len) {
uint16_t tz = zig_ctz_big(val_ptr, is_signed, elem_bits);
if (elem_bits <= 128) {
((uint8_t *)result)[index] = (uint8_t)tz;
} else {
((uint16_t *)result)[index] = tz;
}
val_ptr += elem_bytes;
index += 1;
}
}
static inline void zig_popcount_vec(void *result, const void *val, uint32_t len, bool is_signed, uint16_t elem_bits) {
uint32_t index = 0;
const uint8_t *val_ptr = val;
uint16_t elem_bytes = zig_int_bytes(elem_bits);
while (index < len) {
uint16_t pc = zig_popcount_big(val_ptr, is_signed, elem_bits);
if (elem_bits <= 128) {
((uint8_t *)result)[index] = (uint8_t)pc;
} else {
((uint16_t *)result)[index] = pc;
}
val_ptr += elem_bytes;
index += 1;
}
}
/* ======================== Special Case Intrinsics ========================= */
#if (_MSC_VER && _M_X64) || defined(__x86_64__)

File diff suppressed because it is too large Load Diff

View File

@ -4213,7 +4213,7 @@ pub const Type = extern union {
};
}
pub fn shallowElemType(child_ty: Type) Type {
fn shallowElemType(child_ty: Type) Type {
return switch (child_ty.zigTypeTag()) {
.Array, .Vector => child_ty.childType(),
else => child_ty,

View File

@ -3319,7 +3319,7 @@ pub const Value = extern union {
}
}
fn floatToValue(float: f128, arena: Allocator, dest_ty: Type, target: Target) !Value {
pub fn floatToValue(float: f128, arena: Allocator, dest_ty: Type, target: Target) !Value {
switch (dest_ty.floatBits(target)) {
16 => return Value.Tag.float_16.create(arena, @floatCast(f16, float)),
32 => return Value.Tag.float_32.create(arena, @floatCast(f32, float)),

View File

@ -96,7 +96,6 @@ fn vector8() !void {
test "bitReverse vectors u8" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
@ -115,7 +114,6 @@ fn vector16() !void {
test "bitReverse vectors u16" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
@ -134,7 +132,6 @@ fn vector24() !void {
test "bitReverse vectors u24" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;

View File

@ -62,7 +62,6 @@ fn vector8() !void {
test "@byteSwap vectors u8" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
@ -81,7 +80,6 @@ fn vector16() !void {
test "@byteSwap vectors u16" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
@ -100,7 +98,6 @@ fn vector24() !void {
test "@byteSwap vectors u24" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;

View File

@ -598,7 +598,6 @@ test "cast *[1][*]const u8 to [*]const ?[*]const u8" {
test "vector casts" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO

View File

@ -141,7 +141,6 @@ fn testSqrt() !void {
test "@sqrt with vectors" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
@ -234,7 +233,6 @@ fn testSin() !void {
test "@sin with vectors" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
@ -275,7 +273,6 @@ fn testCos() !void {
test "@cos with vectors" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
@ -315,7 +312,6 @@ fn testExp() !void {
test "@exp with vectors" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
@ -355,7 +351,6 @@ fn testExp2() !void {
test "@exp2" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
@ -409,7 +404,6 @@ test "@log with @vectors" {
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
{
@ -447,7 +441,6 @@ test "@log2 with vectors" {
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
// https://github.com/ziglang/zig/issues/13681
if (builtin.zig_backend == .stage2_llvm and
builtin.cpu.arch == .aarch64 and
@ -491,7 +484,6 @@ test "@log10 with vectors" {
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
comptime try testLog10WithVectors();
try testLog10WithVectors();
@ -537,7 +529,6 @@ fn testFabs() !void {
test "@fabs with vectors" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
@ -660,7 +651,6 @@ fn testFloor() !void {
test "@floor with vectors" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
@ -754,7 +744,6 @@ fn testCeil() !void {
test "@ceil with vectors" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
@ -848,7 +837,6 @@ fn testTrunc() !void {
test "@trunc with vectors" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO

View File

@ -25,7 +25,6 @@ test "@max" {
test "@max on vectors" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@ -75,7 +74,6 @@ test "@min for vectors" {
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const S = struct {

View File

@ -100,7 +100,6 @@ fn vector16() !void {
}
test "vector f16" {
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
@ -124,7 +123,6 @@ fn vector32() !void {
}
test "vector f32" {
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
@ -148,7 +146,6 @@ fn vector64() !void {
}
test "vector f64" {
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
@ -171,7 +168,6 @@ fn vector80() !void {
}
test "vector f80" {
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
@ -195,7 +191,6 @@ fn vector128() !void {
}
test "vector f128" {
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO

View File

@ -25,7 +25,6 @@ test "implicit cast vector to array - bool" {
test "vector wrap operators" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
@ -116,7 +115,6 @@ test "vector float operators" {
test "vector bit operators" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
@ -442,7 +440,6 @@ test "vector comparison operators" {
test "vector division operators" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
@ -525,7 +522,6 @@ test "vector division operators" {
test "vector bitwise not operator" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
@ -557,7 +553,6 @@ test "vector bitwise not operator" {
test "vector shift operators" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
@ -651,7 +646,6 @@ test "vector shift operators" {
test "vector reduce operation" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
@ -707,7 +701,7 @@ test "vector reduce operation" {
// LLVM 11 ERROR: Cannot select type
// https://github.com/ziglang/zig/issues/7138
if (builtin.target.cpu.arch != .aarch64) {
if (builtin.zig_backend != .stage2_llvm or builtin.target.cpu.arch != .aarch64) {
try testReduce(.Min, [4]i64{ 1234567, -386, 0, 3 }, @as(i64, -386));
try testReduce(.Min, [4]u64{ 99, 9999, 9, 99999 }, @as(u64, 9));
}
@ -725,7 +719,7 @@ test "vector reduce operation" {
// LLVM 11 ERROR: Cannot select type
// https://github.com/ziglang/zig/issues/7138
if (builtin.target.cpu.arch != .aarch64) {
if (builtin.zig_backend != .stage2_llvm or builtin.target.cpu.arch != .aarch64) {
try testReduce(.Max, [4]i64{ 1234567, -386, 0, 3 }, @as(i64, 1234567));
try testReduce(.Max, [4]u64{ 99, 9999, 9, 99999 }, @as(u64, 99999));
}
@ -773,14 +767,14 @@ test "vector reduce operation" {
// LLVM 11 ERROR: Cannot select type
// https://github.com/ziglang/zig/issues/7138
if (false) {
try testReduce(.Min, [4]f16{ -1.9, 5.1, f16_nan, 100.0 }, f16_nan);
try testReduce(.Min, [4]f32{ -1.9, 5.1, f32_nan, 100.0 }, f32_nan);
try testReduce(.Min, [4]f64{ -1.9, 5.1, f64_nan, 100.0 }, f64_nan);
if (builtin.zig_backend != .stage2_llvm) {
try testReduce(.Min, [4]f16{ -1.9, 5.1, f16_nan, 100.0 }, @as(f16, -1.9));
try testReduce(.Min, [4]f32{ -1.9, 5.1, f32_nan, 100.0 }, @as(f32, -1.9));
try testReduce(.Min, [4]f64{ -1.9, 5.1, f64_nan, 100.0 }, @as(f64, -1.9));
try testReduce(.Max, [4]f16{ -1.9, 5.1, f16_nan, 100.0 }, f16_nan);
try testReduce(.Max, [4]f32{ -1.9, 5.1, f32_nan, 100.0 }, f32_nan);
try testReduce(.Max, [4]f64{ -1.9, 5.1, f64_nan, 100.0 }, f64_nan);
try testReduce(.Max, [4]f16{ -1.9, 5.1, f16_nan, 100.0 }, @as(f16, 100.0));
try testReduce(.Max, [4]f32{ -1.9, 5.1, f32_nan, 100.0 }, @as(f32, 100.0));
try testReduce(.Max, [4]f64{ -1.9, 5.1, f64_nan, 100.0 }, @as(f64, 100.0));
}
try testReduce(.Mul, [4]f16{ -1.9, 5.1, f16_nan, 100.0 }, f16_nan);
@ -831,7 +825,6 @@ test "mask parameter of @shuffle is comptime scope" {
test "saturating add" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
@ -863,7 +856,6 @@ test "saturating add" {
test "saturating subtraction" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
@ -886,7 +878,6 @@ test "saturating subtraction" {
test "saturating multiplication" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
@ -913,7 +904,6 @@ test "saturating multiplication" {
test "saturating shift-left" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
@ -1047,7 +1037,6 @@ test "@mulWithOverflow" {
}
test "@shlWithOverflow" {
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
@ -1202,7 +1191,6 @@ test "zero multiplicand" {
test "@intCast to u0" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO