From 85be0b8c6589f58d5667d0a8a8f94524d5de5ba6 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Thu, 26 Jan 2023 13:15:35 -0700 Subject: [PATCH] update C headers to LLVM 16 upstream commit 0604154e006e88e9e7f82d8ee5fd076bda206613 --- lib/include/__clang_cuda_texture_intrinsics.h | 2 + lib/include/__clang_hip_libdevice_declares.h | 5 + lib/include/__clang_hip_math.h | 23 +- lib/include/__clang_hip_runtime_wrapper.h | 1 + lib/include/__clang_hip_stdlib.h | 43 + lib/include/altivec.h | 28 +- lib/include/amxfp16intrin.h | 58 + lib/include/amxintrin.h | 32 + lib/include/arm_acle.h | 151 +- lib/include/arm_fp16.h | 2 +- lib/include/arm_neon.h | 35180 ++++++++-------- lib/include/arm_neon_sve_bridge.h | 182 + lib/include/arm_sve.h | 2040 +- lib/include/avx512bf16intrin.h | 33 +- lib/include/avx512fintrin.h | 4 +- lib/include/avx512fp16intrin.h | 15 +- lib/include/avx512ifmavlintrin.h | 40 +- lib/include/avx512vlbf16intrin.h | 69 +- lib/include/avx512vlbwintrin.h | 352 + lib/include/avx512vlfp16intrin.h | 3 + lib/include/avxifmaintrin.h | 177 + lib/include/avxintrin.h | 14 +- lib/include/avxneconvertintrin.h | 484 + lib/include/avxvnniint8intrin.h | 471 + lib/include/cmpccxaddintrin.h | 70 + lib/include/cpuid.h | 12 +- lib/include/cuda_wrappers/cmath | 90 + lib/include/emmintrin.h | 12 +- lib/include/float.h | 27 +- lib/include/gfniintrin.h | 12 +- lib/include/hlsl.h | 15 - lib/include/hlsl_basic_types.h | 64 - lib/include/hlsl_intrinsics.h | 15 - lib/include/immintrin.h | 48 +- lib/include/larchintrin.h | 234 + lib/include/limits.h | 5 +- lib/include/opencl-c-base.h | 19 + lib/include/opencl-c.h | 320 +- lib/include/ppc_wrappers/emmintrin.h | 4 +- lib/include/ppc_wrappers/mm_malloc.h | 2 +- lib/include/ppc_wrappers/mmintrin.h | 4 +- lib/include/ppc_wrappers/pmmintrin.h | 4 +- lib/include/ppc_wrappers/smmintrin.h | 4 +- lib/include/ppc_wrappers/tmmintrin.h | 4 +- lib/include/ppc_wrappers/xmmintrin.h | 4 +- lib/include/prfchiintrin.h | 61 + lib/include/raointintrin.h | 203 + lib/include/riscv_vector.h | 21 +- lib/include/smmintrin.h | 2 +- lib/include/stdarg.h | 30 +- lib/include/stdatomic.h | 9 +- lib/include/stdbool.h | 4 +- lib/include/stddef.h | 9 +- lib/include/stdint.h | 198 +- lib/include/stdnoreturn.h | 2 +- lib/include/unwind.h | 3 +- lib/include/velintrin.h | 2 +- lib/include/x86gprintrin.h | 26 +- lib/include/xmmintrin.h | 3 +- 59 files changed, 21633 insertions(+), 19318 deletions(-) create mode 100644 lib/include/__clang_hip_stdlib.h create mode 100644 lib/include/amxfp16intrin.h create mode 100644 lib/include/arm_neon_sve_bridge.h create mode 100644 lib/include/avxifmaintrin.h create mode 100644 lib/include/avxneconvertintrin.h create mode 100644 lib/include/avxvnniint8intrin.h create mode 100644 lib/include/cmpccxaddintrin.h create mode 100644 lib/include/cuda_wrappers/cmath delete mode 100644 lib/include/hlsl.h delete mode 100644 lib/include/hlsl_basic_types.h delete mode 100644 lib/include/hlsl_intrinsics.h create mode 100644 lib/include/larchintrin.h create mode 100644 lib/include/prfchiintrin.h create mode 100644 lib/include/raointintrin.h diff --git a/lib/include/__clang_cuda_texture_intrinsics.h b/lib/include/__clang_cuda_texture_intrinsics.h index 3c0f0026f1..a719522112 100644 --- a/lib/include/__clang_cuda_texture_intrinsics.h +++ b/lib/include/__clang_cuda_texture_intrinsics.h @@ -666,6 +666,7 @@ __device__ static void __tex_fetch(__T *__ptr, cudaTextureObject_t __handle, __tex_fetch_v4<__op>::template __run<__FetchT>(__handle, __args...)); } +#if CUDA_VERSION < 12000 // texture<> objects get magically converted into a texture reference. However, // there's no way to convert them to cudaTextureObject_t on C++ level. So, we // cheat a bit and use inline assembly to do it. It costs us an extra register @@ -713,6 +714,7 @@ __tex_fetch(__DataT *, __RetT *__ptr, __tex_fetch_v4<__op>::template __run<__FetchT>( __tex_handle_to_obj(__handle), __args...)); } +#endif // CUDA_VERSION } // namespace __cuda_tex } // namespace #pragma pop_macro("__ASM_OUT") diff --git a/lib/include/__clang_hip_libdevice_declares.h b/lib/include/__clang_hip_libdevice_declares.h index 8be848ba2a..be25f4b4a0 100644 --- a/lib/include/__clang_hip_libdevice_declares.h +++ b/lib/include/__clang_hip_libdevice_declares.h @@ -288,12 +288,17 @@ __llvm_amdgcn_rsq_f64(double __x) { __device__ __attribute__((const)) _Float16 __ocml_ceil_f16(_Float16); __device__ _Float16 __ocml_cos_f16(_Float16); +__device__ __attribute__((const)) _Float16 __ocml_cvtrtn_f16_f32(float); +__device__ __attribute__((const)) _Float16 __ocml_cvtrtp_f16_f32(float); +__device__ __attribute__((const)) _Float16 __ocml_cvtrtz_f16_f32(float); __device__ __attribute__((pure)) _Float16 __ocml_exp_f16(_Float16); __device__ __attribute__((pure)) _Float16 __ocml_exp10_f16(_Float16); __device__ __attribute__((pure)) _Float16 __ocml_exp2_f16(_Float16); __device__ __attribute__((const)) _Float16 __ocml_floor_f16(_Float16); __device__ __attribute__((const)) _Float16 __ocml_fma_f16(_Float16, _Float16, _Float16); +__device__ __attribute__((const)) _Float16 __ocml_fmax_f16(_Float16, _Float16); +__device__ __attribute__((const)) _Float16 __ocml_fmin_f16(_Float16, _Float16); __device__ __attribute__((const)) _Float16 __ocml_fabs_f16(_Float16); __device__ __attribute__((const)) int __ocml_isinf_f16(_Float16); __device__ __attribute__((const)) int __ocml_isnan_f16(_Float16); diff --git a/lib/include/__clang_hip_math.h b/lib/include/__clang_hip_math.h index ef7e087b83..537dd0fca8 100644 --- a/lib/include/__clang_hip_math.h +++ b/lib/include/__clang_hip_math.h @@ -70,9 +70,9 @@ __DEVICE__ void __static_assert_equal_size() { #endif __DEVICE__ -uint64_t __make_mantissa_base8(const char *__tagp) { +uint64_t __make_mantissa_base8(const char *__tagp __attribute__((nonnull))) { uint64_t __r = 0; - while (__tagp) { + while (*__tagp != '\0') { char __tmp = *__tagp; if (__tmp >= '0' && __tmp <= '7') @@ -87,9 +87,9 @@ uint64_t __make_mantissa_base8(const char *__tagp) { } __DEVICE__ -uint64_t __make_mantissa_base10(const char *__tagp) { +uint64_t __make_mantissa_base10(const char *__tagp __attribute__((nonnull))) { uint64_t __r = 0; - while (__tagp) { + while (*__tagp != '\0') { char __tmp = *__tagp; if (__tmp >= '0' && __tmp <= '9') @@ -104,9 +104,9 @@ uint64_t __make_mantissa_base10(const char *__tagp) { } __DEVICE__ -uint64_t __make_mantissa_base16(const char *__tagp) { +uint64_t __make_mantissa_base16(const char *__tagp __attribute__((nonnull))) { uint64_t __r = 0; - while (__tagp) { + while (*__tagp != '\0') { char __tmp = *__tagp; if (__tmp >= '0' && __tmp <= '9') @@ -125,10 +125,7 @@ uint64_t __make_mantissa_base16(const char *__tagp) { } __DEVICE__ -uint64_t __make_mantissa(const char *__tagp) { - if (!__tagp) - return 0u; - +uint64_t __make_mantissa(const char *__tagp __attribute__((nonnull))) { if (*__tagp == '0') { ++__tagp; @@ -233,7 +230,7 @@ __DEVICE__ float expm1f(float __x) { return __ocml_expm1_f32(__x); } __DEVICE__ -float fabsf(float __x) { return __ocml_fabs_f32(__x); } +float fabsf(float __x) { return __builtin_fabsf(__x); } __DEVICE__ float fdimf(float __x, float __y) { return __ocml_fdim_f32(__x, __y); } @@ -359,7 +356,7 @@ float modff(float __x, float *__iptr) { } __DEVICE__ -float nanf(const char *__tagp) { +float nanf(const char *__tagp __attribute__((nonnull))) { union { float val; struct ieee_float { @@ -792,7 +789,7 @@ __DEVICE__ double expm1(double __x) { return __ocml_expm1_f64(__x); } __DEVICE__ -double fabs(double __x) { return __ocml_fabs_f64(__x); } +double fabs(double __x) { return __builtin_fabs(__x); } __DEVICE__ double fdim(double __x, double __y) { return __ocml_fdim_f64(__x, __y); } diff --git a/lib/include/__clang_hip_runtime_wrapper.h b/lib/include/__clang_hip_runtime_wrapper.h index 10cec58ed1..0508731de1 100644 --- a/lib/include/__clang_hip_runtime_wrapper.h +++ b/lib/include/__clang_hip_runtime_wrapper.h @@ -113,6 +113,7 @@ __attribute__((weak)) inline __device__ void free(void *__ptr) { #include <__clang_hip_libdevice_declares.h> #include <__clang_hip_math.h> +#include <__clang_hip_stdlib.h> #if defined(__HIPCC_RTC__) #include <__clang_hip_cmath.h> diff --git a/lib/include/__clang_hip_stdlib.h b/lib/include/__clang_hip_stdlib.h new file mode 100644 index 0000000000..bd770e2415 --- /dev/null +++ b/lib/include/__clang_hip_stdlib.h @@ -0,0 +1,43 @@ +/*===---- __clang_hip_stdlib.h - Device-side HIP math support --------------=== + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ +#ifndef __CLANG_HIP_STDLIB_H__ + +#if !defined(__HIP__) && !defined(__OPENMP_AMDGCN__) +#error "This file is for HIP and OpenMP AMDGCN device compilation only." +#endif + +#if !defined(__cplusplus) + +#include + +#ifdef __OPENMP_AMDGCN__ +#define __DEVICE__ static inline __attribute__((always_inline, nothrow)) +#else +#define __DEVICE__ static __device__ inline __attribute__((always_inline)) +#endif + +__DEVICE__ +int abs(int __x) { + int __sgn = __x >> (sizeof(int) * CHAR_BIT - 1); + return (__x ^ __sgn) - __sgn; +} +__DEVICE__ +long labs(long __x) { + long __sgn = __x >> (sizeof(long) * CHAR_BIT - 1); + return (__x ^ __sgn) - __sgn; +} +__DEVICE__ +long long llabs(long long __x) { + long long __sgn = __x >> (sizeof(long long) * CHAR_BIT - 1); + return (__x ^ __sgn) - __sgn; +} + +#endif // !defined(__cplusplus) + +#endif // #define __CLANG_HIP_STDLIB_H__ diff --git a/lib/include/altivec.h b/lib/include/altivec.h index 0b1e76e81c..f50466ec96 100644 --- a/lib/include/altivec.h +++ b/lib/include/altivec.h @@ -17323,32 +17323,32 @@ provided. #define vec_ncipherlast_be __builtin_altivec_crypto_vncipherlast #ifdef __VSX__ -static __inline__ vector unsigned long long __attribute__((__always_inline__)) -__builtin_crypto_vsbox(vector unsigned long long __a) { +static __inline__ vector unsigned char __attribute__((__always_inline__)) +__builtin_crypto_vsbox(vector unsigned char __a) { return __builtin_altivec_crypto_vsbox(__a); } -static __inline__ vector unsigned long long __attribute__((__always_inline__)) -__builtin_crypto_vcipher(vector unsigned long long __a, - vector unsigned long long __b) { +static __inline__ vector unsigned char __attribute__((__always_inline__)) +__builtin_crypto_vcipher(vector unsigned char __a, + vector unsigned char __b) { return __builtin_altivec_crypto_vcipher(__a, __b); } -static __inline__ vector unsigned long long __attribute__((__always_inline__)) -__builtin_crypto_vcipherlast(vector unsigned long long __a, - vector unsigned long long __b) { +static __inline__ vector unsigned char __attribute__((__always_inline__)) +__builtin_crypto_vcipherlast(vector unsigned char __a, + vector unsigned char __b) { return __builtin_altivec_crypto_vcipherlast(__a, __b); } -static __inline__ vector unsigned long long __attribute__((__always_inline__)) -__builtin_crypto_vncipher(vector unsigned long long __a, - vector unsigned long long __b) { +static __inline__ vector unsigned char __attribute__((__always_inline__)) +__builtin_crypto_vncipher(vector unsigned char __a, + vector unsigned char __b) { return __builtin_altivec_crypto_vncipher(__a, __b); } -static __inline__ vector unsigned long long __attribute__((__always_inline__)) -__builtin_crypto_vncipherlast(vector unsigned long long __a, - vector unsigned long long __b) { +static __inline__ vector unsigned char __attribute__((__always_inline__)) +__builtin_crypto_vncipherlast(vector unsigned char __a, + vector unsigned char __b) { return __builtin_altivec_crypto_vncipherlast(__a, __b); } #endif /* __VSX__ */ diff --git a/lib/include/amxfp16intrin.h b/lib/include/amxfp16intrin.h new file mode 100644 index 0000000000..ed798245d4 --- /dev/null +++ b/lib/include/amxfp16intrin.h @@ -0,0 +1,58 @@ +/*===------------- amxfp16intrin.h - AMX_FP16 intrinsics -*- C++ -*---------=== + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===------------------------------------------------------------------------=== + */ + +#ifndef __IMMINTRIN_H +#error "Never use directly; use instead." +#endif /* __IMMINTRIN_H */ + +#ifndef __AMX_FP16INTRIN_H +#define __AMX_FP16INTRIN_H +#ifdef __x86_64__ + +/// Compute dot-product of FP16 (16-bit) floating-point pairs in tiles \a a +/// and \a b, accumulating the intermediate single-precision (32-bit) +/// floating-point elements with elements in \a dst, and store the 32-bit +/// result back to tile \a dst. +/// +/// \headerfile +/// +/// \code +/// void _tile_dpfp16ps (__tile dst, __tile a, __tile b) +/// \endcode +/// +/// \code{.operation} +/// FOR m := 0 TO dst.rows - 1 +/// tmp := dst.row[m] +/// FOR k := 0 TO (a.colsb / 4) - 1 +/// FOR n := 0 TO (dst.colsb / 4) - 1 +/// tmp.fp32[n] += FP32(a.row[m].fp16[2*k+0]) * +/// FP32(b.row[k].fp16[2*n+0]) +/// tmp.fp32[n] += FP32(a.row[m].fp16[2*k+1]) * +/// FP32(b.row[k].fp16[2*n+1]) +/// ENDFOR +/// ENDFOR +/// write_row_and_zero(dst, m, tmp, dst.colsb) +/// ENDFOR +/// zero_upper_rows(dst, dst.rows) +/// zero_tileconfig_start() +/// \endcode +/// +/// This intrinsic corresponds to the \c TDPFP16PS instruction. +/// +/// \param dst +/// The destination tile. Max size is 1024 Bytes. +/// \param a +/// The 1st source tile. Max size is 1024 Bytes. +/// \param b +/// The 2nd source tile. Max size is 1024 Bytes. +#define _tile_dpfp16ps(dst, a, b) \ + __builtin_ia32_tdpfp16ps(dst, a, b) + +#endif /* __x86_64__ */ +#endif /* __AMX_FP16INTRIN_H */ diff --git a/lib/include/amxintrin.h b/lib/include/amxintrin.h index ec67a87e39..baa56f5b28 100644 --- a/lib/include/amxintrin.h +++ b/lib/include/amxintrin.h @@ -22,6 +22,8 @@ __attribute__((__always_inline__, __nodebug__, __target__("amx-int8"))) #define __DEFAULT_FN_ATTRS_BF16 \ __attribute__((__always_inline__, __nodebug__, __target__("amx-bf16"))) +#define __DEFAULT_FN_ATTRS_FP16 \ + __attribute__((__always_inline__, __nodebug__, __target__("amx-fp16"))) /// Load tile configuration from a 64-byte memory location specified by /// "mem_addr". The tile configuration includes the tile type palette, the @@ -290,6 +292,13 @@ _tile_dpbf16ps_internal(unsigned short m, unsigned short n, unsigned short k, return __builtin_ia32_tdpbf16ps_internal(m, n, k, dst, src1, src2); } +/// This is internal intrinsic. C/C++ user should avoid calling it directly. +static __inline__ _tile1024i __DEFAULT_FN_ATTRS_FP16 +_tile_dpfp16ps_internal(unsigned short m, unsigned short n, unsigned short k, + _tile1024i dst, _tile1024i src1, _tile1024i src2) { + return __builtin_ia32_tdpfp16ps_internal(m, n, k, dst, src1, src2); +} + /// This struct pack the shape and tile data together for user. We suggest /// initializing the struct as early as possible, because compiler depends /// on the shape information to do configure. The constant value is preferred @@ -484,9 +493,32 @@ static __inline__ void __tile_dpbf16ps(__tile1024i *dst, __tile1024i src0, src0.tile, src1.tile); } +/// Compute dot-product of FP16 (16-bit) floating-point pairs in tiles src0 and +/// src1, accumulating the intermediate single-precision (32-bit) floating-point +/// elements with elements in "dst", and store the 32-bit result back to tile +/// "dst". +/// +/// \headerfile +/// +/// This intrinsic corresponds to the TDPFP16PS instruction. +/// +/// \param dst +/// The destination tile. Max size is 1024 Bytes. +/// \param src0 +/// The 1st source tile. Max size is 1024 Bytes. +/// \param src1 +/// The 2nd source tile. Max size is 1024 Bytes. +__DEFAULT_FN_ATTRS_FP16 +static __inline__ void __tile_dpfp16ps(__tile1024i *dst, __tile1024i src0, + __tile1024i src1) { + dst->tile = _tile_dpfp16ps_internal(src0.row, src1.col, src0.col, dst->tile, + src0.tile, src1.tile); +} + #undef __DEFAULT_FN_ATTRS_TILE #undef __DEFAULT_FN_ATTRS_INT8 #undef __DEFAULT_FN_ATTRS_BF16 +#undef __DEFAULT_FN_ATTRS_FP16 #endif /* __x86_64__ */ #endif /* __AMXINTRIN_H */ diff --git a/lib/include/arm_acle.h b/lib/include/arm_acle.h index 1cfc140327..e086f1f02d 100644 --- a/lib/include/arm_acle.h +++ b/lib/include/arm_acle.h @@ -64,7 +64,7 @@ static __inline__ void __attribute__((__always_inline__, __nodebug__)) __yield(v } #endif -#if __ARM_32BIT_STATE +#if defined(__ARM_32BIT_STATE) && __ARM_32BIT_STATE #define __dbg(t) __builtin_arm_dbg(t) #endif @@ -82,7 +82,7 @@ __swp(uint32_t __x, volatile uint32_t *__p) { /* 8.6.1 Data prefetch */ #define __pld(addr) __pldx(0, 0, 0, addr) -#if __ARM_32BIT_STATE +#if defined(__ARM_32BIT_STATE) && __ARM_32BIT_STATE #define __pldx(access_kind, cache_level, retention_policy, addr) \ __builtin_arm_prefetch(addr, access_kind, 1) #else @@ -93,7 +93,7 @@ __swp(uint32_t __x, volatile uint32_t *__p) { /* 8.6.2 Instruction prefetch */ #define __pli(addr) __plix(0, 0, addr) -#if __ARM_32BIT_STATE +#if defined(__ARM_32BIT_STATE) && __ARM_32BIT_STATE #define __plix(cache_level, retention_policy, addr) \ __builtin_arm_prefetch(addr, 0, 0) #else @@ -140,17 +140,17 @@ __rorl(unsigned long __x, uint32_t __y) { /* CLZ */ static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__)) __clz(uint32_t __t) { - return __builtin_clz(__t); + return (uint32_t)__builtin_clz(__t); } static __inline__ unsigned long __attribute__((__always_inline__, __nodebug__)) __clzl(unsigned long __t) { - return __builtin_clzl(__t); + return (unsigned long)__builtin_clzl(__t); } static __inline__ uint64_t __attribute__((__always_inline__, __nodebug__)) __clzll(uint64_t __t) { - return __builtin_clzll(__t); + return (uint64_t)__builtin_clzll(__t); } /* CLS */ @@ -201,7 +201,7 @@ __rev16(uint32_t __t) { static __inline__ uint64_t __attribute__((__always_inline__, __nodebug__)) __rev16ll(uint64_t __t) { - return (((uint64_t)__rev16(__t >> 32)) << 32) | __rev16(__t); + return (((uint64_t)__rev16(__t >> 32)) << 32) | (uint64_t)__rev16((uint32_t)__t); } static __inline__ unsigned long __attribute__((__always_inline__, __nodebug__)) @@ -216,7 +216,7 @@ __rev16l(unsigned long __t) { /* REVSH */ static __inline__ int16_t __attribute__((__always_inline__, __nodebug__)) __revsh(int16_t __t) { - return __builtin_bswap16(__t); + return (int16_t)__builtin_bswap16((uint16_t)__t); } /* RBIT */ @@ -227,7 +227,7 @@ __rbit(uint32_t __t) { static __inline__ uint64_t __attribute__((__always_inline__, __nodebug__)) __rbitll(uint64_t __t) { -#if __ARM_32BIT_STATE +#if defined(__ARM_32BIT_STATE) && __ARM_32BIT_STATE return (((uint64_t)__builtin_arm_rbit(__t)) << 32) | __builtin_arm_rbit(__t >> 32); #else @@ -247,7 +247,7 @@ __rbitl(unsigned long __t) { /* * 9.3 16-bit multiplications */ -#if __ARM_FEATURE_DSP +#if defined(__ARM_FEATURE_DSP) && __ARM_FEATURE_DSP static __inline__ int32_t __attribute__((__always_inline__,__nodebug__)) __smulbb(int32_t __a, int32_t __b) { return __builtin_arm_smulbb(__a, __b); @@ -277,17 +277,17 @@ __smulwt(int32_t __a, int32_t __b) { /* * 9.4 Saturating intrinsics * - * FIXME: Change guard to their corrosponding __ARM_FEATURE flag when Q flag + * FIXME: Change guard to their corresponding __ARM_FEATURE flag when Q flag * intrinsics are implemented and the flag is enabled. */ /* 9.4.1 Width-specified saturation intrinsics */ -#if __ARM_FEATURE_SAT +#if defined(__ARM_FEATURE_SAT) && __ARM_FEATURE_SAT #define __ssat(x, y) __builtin_arm_ssat(x, y) #define __usat(x, y) __builtin_arm_usat(x, y) #endif /* 9.4.2 Saturating addition and subtraction intrinsics */ -#if __ARM_FEATURE_DSP +#if defined(__ARM_FEATURE_DSP) && __ARM_FEATURE_DSP static __inline__ int32_t __attribute__((__always_inline__, __nodebug__)) __qadd(int32_t __t, int32_t __v) { return __builtin_arm_qadd(__t, __v); @@ -305,7 +305,7 @@ __qdbl(int32_t __t) { #endif /* 9.4.3 Accumultating multiplications */ -#if __ARM_FEATURE_DSP +#if defined(__ARM_FEATURE_DSP) && __ARM_FEATURE_DSP static __inline__ int32_t __attribute__((__always_inline__, __nodebug__)) __smlabb(int32_t __a, int32_t __b, int32_t __c) { return __builtin_arm_smlabb(__a, __b, __c); @@ -334,13 +334,13 @@ __smlawt(int32_t __a, int32_t __b, int32_t __c) { /* 9.5.4 Parallel 16-bit saturation */ -#if __ARM_FEATURE_SIMD32 +#if defined(__ARM_FEATURE_SIMD32) && __ARM_FEATURE_SIMD32 #define __ssat16(x, y) __builtin_arm_ssat16(x, y) #define __usat16(x, y) __builtin_arm_usat16(x, y) #endif /* 9.5.5 Packing and unpacking */ -#if __ARM_FEATURE_SIMD32 +#if defined(__ARM_FEATURE_SIMD32) && __ARM_FEATURE_SIMD32 typedef int32_t int8x4_t; typedef int32_t int16x2_t; typedef uint32_t uint8x4_t; @@ -365,7 +365,7 @@ __uxtb16(int8x4_t __a) { #endif /* 9.5.6 Parallel selection */ -#if __ARM_FEATURE_SIMD32 +#if defined(__ARM_FEATURE_SIMD32) && __ARM_FEATURE_SIMD32 static __inline__ uint8x4_t __attribute__((__always_inline__, __nodebug__)) __sel(uint8x4_t __a, uint8x4_t __b) { return __builtin_arm_sel(__a, __b); @@ -373,7 +373,7 @@ __sel(uint8x4_t __a, uint8x4_t __b) { #endif /* 9.5.7 Parallel 8-bit addition and subtraction */ -#if __ARM_FEATURE_SIMD32 +#if defined(__ARM_FEATURE_SIMD32) && __ARM_FEATURE_SIMD32 static __inline__ int8x4_t __attribute__((__always_inline__, __nodebug__)) __qadd8(int8x4_t __a, int8x4_t __b) { return __builtin_arm_qadd8(__a, __b); @@ -425,7 +425,7 @@ __usub8(uint8x4_t __a, uint8x4_t __b) { #endif /* 9.5.8 Sum of 8-bit absolute differences */ -#if __ARM_FEATURE_SIMD32 +#if defined(__ARM_FEATURE_SIMD32) && __ARM_FEATURE_SIMD32 static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__)) __usad8(uint8x4_t __a, uint8x4_t __b) { return __builtin_arm_usad8(__a, __b); @@ -437,7 +437,7 @@ __usada8(uint8x4_t __a, uint8x4_t __b, uint32_t __c) { #endif /* 9.5.9 Parallel 16-bit addition and subtraction */ -#if __ARM_FEATURE_SIMD32 +#if defined(__ARM_FEATURE_SIMD32) && __ARM_FEATURE_SIMD32 static __inline__ int16x2_t __attribute__((__always_inline__, __nodebug__)) __qadd16(int16x2_t __a, int16x2_t __b) { return __builtin_arm_qadd16(__a, __b); @@ -537,7 +537,7 @@ __usub16(uint16x2_t __a, uint16x2_t __b) { #endif /* 9.5.10 Parallel 16-bit multiplications */ -#if __ARM_FEATURE_SIMD32 +#if defined(__ARM_FEATURE_SIMD32) && __ARM_FEATURE_SIMD32 static __inline__ int32_t __attribute__((__always_inline__, __nodebug__)) __smlad(int16x2_t __a, int16x2_t __b, int32_t __c) { return __builtin_arm_smlad(__a, __b, __c); @@ -589,155 +589,156 @@ __smusdx(int16x2_t __a, int16x2_t __b) { #endif /* 9.7 CRC32 intrinsics */ -#if __ARM_FEATURE_CRC32 -static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__)) +#if (defined(__ARM_FEATURE_CRC32) && __ARM_FEATURE_CRC32) || \ + (defined(__ARM_64BIT_STATE) && __ARM_64BIT_STATE) +static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__, target("crc"))) __crc32b(uint32_t __a, uint8_t __b) { return __builtin_arm_crc32b(__a, __b); } -static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__)) +static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__, target("crc"))) __crc32h(uint32_t __a, uint16_t __b) { return __builtin_arm_crc32h(__a, __b); } -static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__)) +static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__, target("crc"))) __crc32w(uint32_t __a, uint32_t __b) { return __builtin_arm_crc32w(__a, __b); } -static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__)) +static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__, target("crc"))) __crc32d(uint32_t __a, uint64_t __b) { return __builtin_arm_crc32d(__a, __b); } -static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__)) +static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__, target("crc"))) __crc32cb(uint32_t __a, uint8_t __b) { return __builtin_arm_crc32cb(__a, __b); } -static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__)) +static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__, target("crc"))) __crc32ch(uint32_t __a, uint16_t __b) { return __builtin_arm_crc32ch(__a, __b); } -static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__)) +static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__, target("crc"))) __crc32cw(uint32_t __a, uint32_t __b) { return __builtin_arm_crc32cw(__a, __b); } -static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__)) +static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__, target("crc"))) __crc32cd(uint32_t __a, uint64_t __b) { return __builtin_arm_crc32cd(__a, __b); } #endif /* Armv8.3-A Javascript conversion intrinsic */ -#if __ARM_64BIT_STATE && defined(__ARM_FEATURE_JCVT) -static __inline__ int32_t __attribute__((__always_inline__, __nodebug__)) +#if defined(__ARM_64BIT_STATE) && __ARM_64BIT_STATE +static __inline__ int32_t __attribute__((__always_inline__, __nodebug__, target("v8.3a"))) __jcvt(double __a) { return __builtin_arm_jcvt(__a); } #endif /* Armv8.5-A FP rounding intrinsics */ -#if __ARM_64BIT_STATE && defined(__ARM_FEATURE_FRINT) -static __inline__ float __attribute__((__always_inline__, __nodebug__)) -__frint32zf(float __a) { - return __builtin_arm_frint32zf(__a); +#if defined(__ARM_64BIT_STATE) && __ARM_64BIT_STATE +static __inline__ float __attribute__((__always_inline__, __nodebug__, target("v8.5a"))) +__rint32zf(float __a) { + return __builtin_arm_rint32zf(__a); } -static __inline__ double __attribute__((__always_inline__, __nodebug__)) -__frint32z(double __a) { - return __builtin_arm_frint32z(__a); +static __inline__ double __attribute__((__always_inline__, __nodebug__, target("v8.5a"))) +__rint32z(double __a) { + return __builtin_arm_rint32z(__a); } -static __inline__ float __attribute__((__always_inline__, __nodebug__)) -__frint64zf(float __a) { - return __builtin_arm_frint64zf(__a); +static __inline__ float __attribute__((__always_inline__, __nodebug__, target("v8.5a"))) +__rint64zf(float __a) { + return __builtin_arm_rint64zf(__a); } -static __inline__ double __attribute__((__always_inline__, __nodebug__)) -__frint64z(double __a) { - return __builtin_arm_frint64z(__a); +static __inline__ double __attribute__((__always_inline__, __nodebug__, target("v8.5a"))) +__rint64z(double __a) { + return __builtin_arm_rint64z(__a); } -static __inline__ float __attribute__((__always_inline__, __nodebug__)) -__frint32xf(float __a) { - return __builtin_arm_frint32xf(__a); +static __inline__ float __attribute__((__always_inline__, __nodebug__, target("v8.5a"))) +__rint32xf(float __a) { + return __builtin_arm_rint32xf(__a); } -static __inline__ double __attribute__((__always_inline__, __nodebug__)) -__frint32x(double __a) { - return __builtin_arm_frint32x(__a); +static __inline__ double __attribute__((__always_inline__, __nodebug__, target("v8.5a"))) +__rint32x(double __a) { + return __builtin_arm_rint32x(__a); } -static __inline__ float __attribute__((__always_inline__, __nodebug__)) -__frint64xf(float __a) { - return __builtin_arm_frint64xf(__a); +static __inline__ float __attribute__((__always_inline__, __nodebug__, target("v8.5a"))) +__rint64xf(float __a) { + return __builtin_arm_rint64xf(__a); } -static __inline__ double __attribute__((__always_inline__, __nodebug__)) -__frint64x(double __a) { - return __builtin_arm_frint64x(__a); +static __inline__ double __attribute__((__always_inline__, __nodebug__, target("v8.5a"))) +__rint64x(double __a) { + return __builtin_arm_rint64x(__a); } #endif /* Armv8.7-A load/store 64-byte intrinsics */ -#if __ARM_64BIT_STATE && defined(__ARM_FEATURE_LS64) +#if defined(__ARM_64BIT_STATE) && __ARM_64BIT_STATE typedef struct { uint64_t val[8]; } data512_t; -static __inline__ data512_t __attribute__((__always_inline__, __nodebug__)) +static __inline__ data512_t __attribute__((__always_inline__, __nodebug__, target("ls64"))) __arm_ld64b(const void *__addr) { - data512_t __value; - __builtin_arm_ld64b(__addr, __value.val); - return __value; + data512_t __value; + __builtin_arm_ld64b(__addr, __value.val); + return __value; } -static __inline__ void __attribute__((__always_inline__, __nodebug__)) +static __inline__ void __attribute__((__always_inline__, __nodebug__, target("ls64"))) __arm_st64b(void *__addr, data512_t __value) { - __builtin_arm_st64b(__addr, __value.val); + __builtin_arm_st64b(__addr, __value.val); } -static __inline__ uint64_t __attribute__((__always_inline__, __nodebug__)) +static __inline__ uint64_t __attribute__((__always_inline__, __nodebug__, target("ls64"))) __arm_st64bv(void *__addr, data512_t __value) { - return __builtin_arm_st64bv(__addr, __value.val); + return __builtin_arm_st64bv(__addr, __value.val); } -static __inline__ uint64_t __attribute__((__always_inline__, __nodebug__)) +static __inline__ uint64_t __attribute__((__always_inline__, __nodebug__, target("ls64"))) __arm_st64bv0(void *__addr, data512_t __value) { - return __builtin_arm_st64bv0(__addr, __value.val); + return __builtin_arm_st64bv0(__addr, __value.val); } #endif /* 10.1 Special register intrinsics */ #define __arm_rsr(sysreg) __builtin_arm_rsr(sysreg) #define __arm_rsr64(sysreg) __builtin_arm_rsr64(sysreg) +#define __arm_rsr128(sysreg) __builtin_arm_rsr128(sysreg) #define __arm_rsrp(sysreg) __builtin_arm_rsrp(sysreg) #define __arm_rsrf(sysreg) __builtin_bit_cast(float, __arm_rsr(sysreg)) #define __arm_rsrf64(sysreg) __builtin_bit_cast(double, __arm_rsr64(sysreg)) #define __arm_wsr(sysreg, v) __builtin_arm_wsr(sysreg, v) #define __arm_wsr64(sysreg, v) __builtin_arm_wsr64(sysreg, v) +#define __arm_wsr128(sysreg, v) __builtin_arm_wsr128(sysreg, v) #define __arm_wsrp(sysreg, v) __builtin_arm_wsrp(sysreg, v) #define __arm_wsrf(sysreg, v) __arm_wsr(sysreg, __builtin_bit_cast(uint32_t, v)) #define __arm_wsrf64(sysreg, v) __arm_wsr64(sysreg, __builtin_bit_cast(uint64_t, v)) /* Memory Tagging Extensions (MTE) Intrinsics */ -#if __ARM_FEATURE_MEMORY_TAGGING +#if defined(__ARM_64BIT_STATE) && __ARM_64BIT_STATE #define __arm_mte_create_random_tag(__ptr, __mask) __builtin_arm_irg(__ptr, __mask) #define __arm_mte_increment_tag(__ptr, __tag_offset) __builtin_arm_addg(__ptr, __tag_offset) #define __arm_mte_exclude_tag(__ptr, __excluded) __builtin_arm_gmi(__ptr, __excluded) #define __arm_mte_get_tag(__ptr) __builtin_arm_ldg(__ptr) #define __arm_mte_set_tag(__ptr) __builtin_arm_stg(__ptr) #define __arm_mte_ptrdiff(__ptra, __ptrb) __builtin_arm_subp(__ptra, __ptrb) -#endif /* Memory Operations Intrinsics */ -#if __ARM_FEATURE_MOPS && __ARM_FEATURE_MEMORY_TAGGING #define __arm_mops_memset_tag(__tagged_address, __value, __size) \ __builtin_arm_mops_memset_tag(__tagged_address, __value, __size) #endif /* Transactional Memory Extension (TME) Intrinsics */ -#if __ARM_FEATURE_TME +#if defined(__ARM_FEATURE_TME) && __ARM_FEATURE_TME #define _TMFAILURE_REASON 0x00007fffu #define _TMFAILURE_RTRY 0x00008000u @@ -759,12 +760,12 @@ __arm_st64bv0(void *__addr, data512_t __value) { #endif /* __ARM_FEATURE_TME */ /* Armv8.5-A Random number generation intrinsics */ -#if __ARM_64BIT_STATE && defined(__ARM_FEATURE_RNG) -static __inline__ int __attribute__((__always_inline__, __nodebug__)) +#if defined(__ARM_64BIT_STATE) && __ARM_64BIT_STATE +static __inline__ int __attribute__((__always_inline__, __nodebug__, target("rand"))) __rndr(uint64_t *__p) { return __builtin_arm_rndr(__p); } -static __inline__ int __attribute__((__always_inline__, __nodebug__)) +static __inline__ int __attribute__((__always_inline__, __nodebug__, target("rand"))) __rndrrs(uint64_t *__p) { return __builtin_arm_rndrrs(__p); } diff --git a/lib/include/arm_fp16.h b/lib/include/arm_fp16.h index 6e8470f54c..f114c6997b 100644 --- a/lib/include/arm_fp16.h +++ b/lib/include/arm_fp16.h @@ -29,7 +29,7 @@ typedef __fp16 float16_t; #define __ai static __inline__ __attribute__((__always_inline__, __nodebug__)) -#if defined(__ARM_FEATURE_FP16_SCALAR_ARITHMETIC) && defined(__aarch64__) +#if defined(__aarch64__) #define vabdh_f16(__p0, __p1) __extension__ ({ \ float16_t __ret; \ float16_t __s0 = __p0; \ diff --git a/lib/include/arm_neon.h b/lib/include/arm_neon.h index d5c8830f15..23d26a059d 100644 --- a/lib/include/arm_neon.h +++ b/lib/include/arm_neon.h @@ -34,11 +34,8 @@ #include -#ifdef __ARM_FEATURE_BF16 #include typedef __bf16 bfloat16_t; -#endif - typedef float float32_t; typedef __fp16 float16_t; #ifdef __aarch64__ @@ -428,7 +425,6 @@ typedef struct poly64x2x4_t { poly64x2_t val[4]; } poly64x2x4_t; -#ifdef __ARM_FEATURE_BF16 typedef __attribute__((neon_vector_type(4))) bfloat16_t bfloat16x4_t; typedef __attribute__((neon_vector_type(8))) bfloat16_t bfloat16x8_t; @@ -456,8 +452,6 @@ typedef struct bfloat16x8x4_t { bfloat16x8_t val[4]; } bfloat16x8x4_t; -#endif - #define __ai static __inline__ __attribute__((__always_inline__, __nodebug__)) #ifdef __LITTLE_ENDIAN__ @@ -6124,194 +6118,230 @@ __ai uint32x2_t vcvt_u32_f32(float32x2_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -#define vdupq_lane_s32(__p0_20, __p1_20) __extension__ ({ \ - int32x4_t __ret_20; \ - int32x2_t __s0_20 = __p0_20; \ - __ret_20 = splatq_lane_s32(__s0_20, __p1_20); \ +#define vdupq_lane_f16(__p0_20, __p1_20) __extension__ ({ \ + float16x8_t __ret_20; \ + float16x4_t __s0_20 = __p0_20; \ + __ret_20 = splatq_lane_f16(__s0_20, __p1_20); \ __ret_20; \ }) #else -#define vdupq_lane_s32(__p0_21, __p1_21) __extension__ ({ \ - int32x4_t __ret_21; \ - int32x2_t __s0_21 = __p0_21; \ - int32x2_t __rev0_21; __rev0_21 = __builtin_shufflevector(__s0_21, __s0_21, 1, 0); \ - __ret_21 = __noswap_splatq_lane_s32(__rev0_21, __p1_21); \ - __ret_21 = __builtin_shufflevector(__ret_21, __ret_21, 3, 2, 1, 0); \ +#define vdupq_lane_f16(__p0_21, __p1_21) __extension__ ({ \ + float16x8_t __ret_21; \ + float16x4_t __s0_21 = __p0_21; \ + float16x4_t __rev0_21; __rev0_21 = __builtin_shufflevector(__s0_21, __s0_21, 3, 2, 1, 0); \ + __ret_21 = __noswap_splatq_lane_f16(__rev0_21, __p1_21); \ + __ret_21 = __builtin_shufflevector(__ret_21, __ret_21, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_21; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vdupq_lane_s64(__p0_22, __p1_22) __extension__ ({ \ - int64x2_t __ret_22; \ - int64x1_t __s0_22 = __p0_22; \ - __ret_22 = splatq_lane_s64(__s0_22, __p1_22); \ +#define vdupq_lane_s32(__p0_22, __p1_22) __extension__ ({ \ + int32x4_t __ret_22; \ + int32x2_t __s0_22 = __p0_22; \ + __ret_22 = splatq_lane_s32(__s0_22, __p1_22); \ __ret_22; \ }) #else -#define vdupq_lane_s64(__p0_23, __p1_23) __extension__ ({ \ - int64x2_t __ret_23; \ - int64x1_t __s0_23 = __p0_23; \ - __ret_23 = __noswap_splatq_lane_s64(__s0_23, __p1_23); \ - __ret_23 = __builtin_shufflevector(__ret_23, __ret_23, 1, 0); \ +#define vdupq_lane_s32(__p0_23, __p1_23) __extension__ ({ \ + int32x4_t __ret_23; \ + int32x2_t __s0_23 = __p0_23; \ + int32x2_t __rev0_23; __rev0_23 = __builtin_shufflevector(__s0_23, __s0_23, 1, 0); \ + __ret_23 = __noswap_splatq_lane_s32(__rev0_23, __p1_23); \ + __ret_23 = __builtin_shufflevector(__ret_23, __ret_23, 3, 2, 1, 0); \ __ret_23; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vdupq_lane_s16(__p0_24, __p1_24) __extension__ ({ \ - int16x8_t __ret_24; \ - int16x4_t __s0_24 = __p0_24; \ - __ret_24 = splatq_lane_s16(__s0_24, __p1_24); \ +#define vdupq_lane_s64(__p0_24, __p1_24) __extension__ ({ \ + int64x2_t __ret_24; \ + int64x1_t __s0_24 = __p0_24; \ + __ret_24 = splatq_lane_s64(__s0_24, __p1_24); \ __ret_24; \ }) #else -#define vdupq_lane_s16(__p0_25, __p1_25) __extension__ ({ \ - int16x8_t __ret_25; \ - int16x4_t __s0_25 = __p0_25; \ - int16x4_t __rev0_25; __rev0_25 = __builtin_shufflevector(__s0_25, __s0_25, 3, 2, 1, 0); \ - __ret_25 = __noswap_splatq_lane_s16(__rev0_25, __p1_25); \ - __ret_25 = __builtin_shufflevector(__ret_25, __ret_25, 7, 6, 5, 4, 3, 2, 1, 0); \ +#define vdupq_lane_s64(__p0_25, __p1_25) __extension__ ({ \ + int64x2_t __ret_25; \ + int64x1_t __s0_25 = __p0_25; \ + __ret_25 = __noswap_splatq_lane_s64(__s0_25, __p1_25); \ + __ret_25 = __builtin_shufflevector(__ret_25, __ret_25, 1, 0); \ __ret_25; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vdup_lane_u8(__p0_26, __p1_26) __extension__ ({ \ - uint8x8_t __ret_26; \ - uint8x8_t __s0_26 = __p0_26; \ - __ret_26 = splat_lane_u8(__s0_26, __p1_26); \ +#define vdupq_lane_s16(__p0_26, __p1_26) __extension__ ({ \ + int16x8_t __ret_26; \ + int16x4_t __s0_26 = __p0_26; \ + __ret_26 = splatq_lane_s16(__s0_26, __p1_26); \ __ret_26; \ }) #else -#define vdup_lane_u8(__p0_27, __p1_27) __extension__ ({ \ - uint8x8_t __ret_27; \ - uint8x8_t __s0_27 = __p0_27; \ - uint8x8_t __rev0_27; __rev0_27 = __builtin_shufflevector(__s0_27, __s0_27, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_27 = __noswap_splat_lane_u8(__rev0_27, __p1_27); \ +#define vdupq_lane_s16(__p0_27, __p1_27) __extension__ ({ \ + int16x8_t __ret_27; \ + int16x4_t __s0_27 = __p0_27; \ + int16x4_t __rev0_27; __rev0_27 = __builtin_shufflevector(__s0_27, __s0_27, 3, 2, 1, 0); \ + __ret_27 = __noswap_splatq_lane_s16(__rev0_27, __p1_27); \ __ret_27 = __builtin_shufflevector(__ret_27, __ret_27, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_27; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vdup_lane_u32(__p0_28, __p1_28) __extension__ ({ \ - uint32x2_t __ret_28; \ - uint32x2_t __s0_28 = __p0_28; \ - __ret_28 = splat_lane_u32(__s0_28, __p1_28); \ +#define vdup_lane_u8(__p0_28, __p1_28) __extension__ ({ \ + uint8x8_t __ret_28; \ + uint8x8_t __s0_28 = __p0_28; \ + __ret_28 = splat_lane_u8(__s0_28, __p1_28); \ __ret_28; \ }) #else -#define vdup_lane_u32(__p0_29, __p1_29) __extension__ ({ \ - uint32x2_t __ret_29; \ - uint32x2_t __s0_29 = __p0_29; \ - uint32x2_t __rev0_29; __rev0_29 = __builtin_shufflevector(__s0_29, __s0_29, 1, 0); \ - __ret_29 = __noswap_splat_lane_u32(__rev0_29, __p1_29); \ - __ret_29 = __builtin_shufflevector(__ret_29, __ret_29, 1, 0); \ +#define vdup_lane_u8(__p0_29, __p1_29) __extension__ ({ \ + uint8x8_t __ret_29; \ + uint8x8_t __s0_29 = __p0_29; \ + uint8x8_t __rev0_29; __rev0_29 = __builtin_shufflevector(__s0_29, __s0_29, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_29 = __noswap_splat_lane_u8(__rev0_29, __p1_29); \ + __ret_29 = __builtin_shufflevector(__ret_29, __ret_29, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_29; \ }) #endif -#define vdup_lane_u64(__p0_30, __p1_30) __extension__ ({ \ - uint64x1_t __ret_30; \ - uint64x1_t __s0_30 = __p0_30; \ - __ret_30 = splat_lane_u64(__s0_30, __p1_30); \ +#ifdef __LITTLE_ENDIAN__ +#define vdup_lane_u32(__p0_30, __p1_30) __extension__ ({ \ + uint32x2_t __ret_30; \ + uint32x2_t __s0_30 = __p0_30; \ + __ret_30 = splat_lane_u32(__s0_30, __p1_30); \ __ret_30; \ }) -#ifdef __LITTLE_ENDIAN__ -#define vdup_lane_u16(__p0_31, __p1_31) __extension__ ({ \ - uint16x4_t __ret_31; \ - uint16x4_t __s0_31 = __p0_31; \ - __ret_31 = splat_lane_u16(__s0_31, __p1_31); \ - __ret_31; \ -}) #else -#define vdup_lane_u16(__p0_32, __p1_32) __extension__ ({ \ - uint16x4_t __ret_32; \ - uint16x4_t __s0_32 = __p0_32; \ - uint16x4_t __rev0_32; __rev0_32 = __builtin_shufflevector(__s0_32, __s0_32, 3, 2, 1, 0); \ - __ret_32 = __noswap_splat_lane_u16(__rev0_32, __p1_32); \ - __ret_32 = __builtin_shufflevector(__ret_32, __ret_32, 3, 2, 1, 0); \ - __ret_32; \ +#define vdup_lane_u32(__p0_31, __p1_31) __extension__ ({ \ + uint32x2_t __ret_31; \ + uint32x2_t __s0_31 = __p0_31; \ + uint32x2_t __rev0_31; __rev0_31 = __builtin_shufflevector(__s0_31, __s0_31, 1, 0); \ + __ret_31 = __noswap_splat_lane_u32(__rev0_31, __p1_31); \ + __ret_31 = __builtin_shufflevector(__ret_31, __ret_31, 1, 0); \ + __ret_31; \ }) #endif +#define vdup_lane_u64(__p0_32, __p1_32) __extension__ ({ \ + uint64x1_t __ret_32; \ + uint64x1_t __s0_32 = __p0_32; \ + __ret_32 = splat_lane_u64(__s0_32, __p1_32); \ + __ret_32; \ +}) #ifdef __LITTLE_ENDIAN__ -#define vdup_lane_s8(__p0_33, __p1_33) __extension__ ({ \ - int8x8_t __ret_33; \ - int8x8_t __s0_33 = __p0_33; \ - __ret_33 = splat_lane_s8(__s0_33, __p1_33); \ +#define vdup_lane_u16(__p0_33, __p1_33) __extension__ ({ \ + uint16x4_t __ret_33; \ + uint16x4_t __s0_33 = __p0_33; \ + __ret_33 = splat_lane_u16(__s0_33, __p1_33); \ __ret_33; \ }) #else -#define vdup_lane_s8(__p0_34, __p1_34) __extension__ ({ \ - int8x8_t __ret_34; \ - int8x8_t __s0_34 = __p0_34; \ - int8x8_t __rev0_34; __rev0_34 = __builtin_shufflevector(__s0_34, __s0_34, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_34 = __noswap_splat_lane_s8(__rev0_34, __p1_34); \ - __ret_34 = __builtin_shufflevector(__ret_34, __ret_34, 7, 6, 5, 4, 3, 2, 1, 0); \ +#define vdup_lane_u16(__p0_34, __p1_34) __extension__ ({ \ + uint16x4_t __ret_34; \ + uint16x4_t __s0_34 = __p0_34; \ + uint16x4_t __rev0_34; __rev0_34 = __builtin_shufflevector(__s0_34, __s0_34, 3, 2, 1, 0); \ + __ret_34 = __noswap_splat_lane_u16(__rev0_34, __p1_34); \ + __ret_34 = __builtin_shufflevector(__ret_34, __ret_34, 3, 2, 1, 0); \ __ret_34; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vdup_lane_f32(__p0_35, __p1_35) __extension__ ({ \ - float32x2_t __ret_35; \ - float32x2_t __s0_35 = __p0_35; \ - __ret_35 = splat_lane_f32(__s0_35, __p1_35); \ +#define vdup_lane_s8(__p0_35, __p1_35) __extension__ ({ \ + int8x8_t __ret_35; \ + int8x8_t __s0_35 = __p0_35; \ + __ret_35 = splat_lane_s8(__s0_35, __p1_35); \ __ret_35; \ }) #else -#define vdup_lane_f32(__p0_36, __p1_36) __extension__ ({ \ - float32x2_t __ret_36; \ - float32x2_t __s0_36 = __p0_36; \ - float32x2_t __rev0_36; __rev0_36 = __builtin_shufflevector(__s0_36, __s0_36, 1, 0); \ - __ret_36 = __noswap_splat_lane_f32(__rev0_36, __p1_36); \ - __ret_36 = __builtin_shufflevector(__ret_36, __ret_36, 1, 0); \ +#define vdup_lane_s8(__p0_36, __p1_36) __extension__ ({ \ + int8x8_t __ret_36; \ + int8x8_t __s0_36 = __p0_36; \ + int8x8_t __rev0_36; __rev0_36 = __builtin_shufflevector(__s0_36, __s0_36, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_36 = __noswap_splat_lane_s8(__rev0_36, __p1_36); \ + __ret_36 = __builtin_shufflevector(__ret_36, __ret_36, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_36; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vdup_lane_s32(__p0_37, __p1_37) __extension__ ({ \ - int32x2_t __ret_37; \ - int32x2_t __s0_37 = __p0_37; \ - __ret_37 = splat_lane_s32(__s0_37, __p1_37); \ +#define vdup_lane_f32(__p0_37, __p1_37) __extension__ ({ \ + float32x2_t __ret_37; \ + float32x2_t __s0_37 = __p0_37; \ + __ret_37 = splat_lane_f32(__s0_37, __p1_37); \ __ret_37; \ }) #else -#define vdup_lane_s32(__p0_38, __p1_38) __extension__ ({ \ - int32x2_t __ret_38; \ - int32x2_t __s0_38 = __p0_38; \ - int32x2_t __rev0_38; __rev0_38 = __builtin_shufflevector(__s0_38, __s0_38, 1, 0); \ - __ret_38 = __noswap_splat_lane_s32(__rev0_38, __p1_38); \ +#define vdup_lane_f32(__p0_38, __p1_38) __extension__ ({ \ + float32x2_t __ret_38; \ + float32x2_t __s0_38 = __p0_38; \ + float32x2_t __rev0_38; __rev0_38 = __builtin_shufflevector(__s0_38, __s0_38, 1, 0); \ + __ret_38 = __noswap_splat_lane_f32(__rev0_38, __p1_38); \ __ret_38 = __builtin_shufflevector(__ret_38, __ret_38, 1, 0); \ __ret_38; \ }) #endif -#define vdup_lane_s64(__p0_39, __p1_39) __extension__ ({ \ - int64x1_t __ret_39; \ - int64x1_t __s0_39 = __p0_39; \ - __ret_39 = splat_lane_s64(__s0_39, __p1_39); \ +#ifdef __LITTLE_ENDIAN__ +#define vdup_lane_f16(__p0_39, __p1_39) __extension__ ({ \ + float16x4_t __ret_39; \ + float16x4_t __s0_39 = __p0_39; \ + __ret_39 = splat_lane_f16(__s0_39, __p1_39); \ __ret_39; \ }) -#ifdef __LITTLE_ENDIAN__ -#define vdup_lane_s16(__p0_40, __p1_40) __extension__ ({ \ - int16x4_t __ret_40; \ - int16x4_t __s0_40 = __p0_40; \ - __ret_40 = splat_lane_s16(__s0_40, __p1_40); \ +#else +#define vdup_lane_f16(__p0_40, __p1_40) __extension__ ({ \ + float16x4_t __ret_40; \ + float16x4_t __s0_40 = __p0_40; \ + float16x4_t __rev0_40; __rev0_40 = __builtin_shufflevector(__s0_40, __s0_40, 3, 2, 1, 0); \ + __ret_40 = __noswap_splat_lane_f16(__rev0_40, __p1_40); \ + __ret_40 = __builtin_shufflevector(__ret_40, __ret_40, 3, 2, 1, 0); \ __ret_40; \ }) -#else -#define vdup_lane_s16(__p0_41, __p1_41) __extension__ ({ \ - int16x4_t __ret_41; \ - int16x4_t __s0_41 = __p0_41; \ - int16x4_t __rev0_41; __rev0_41 = __builtin_shufflevector(__s0_41, __s0_41, 3, 2, 1, 0); \ - __ret_41 = __noswap_splat_lane_s16(__rev0_41, __p1_41); \ - __ret_41 = __builtin_shufflevector(__ret_41, __ret_41, 3, 2, 1, 0); \ +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdup_lane_s32(__p0_41, __p1_41) __extension__ ({ \ + int32x2_t __ret_41; \ + int32x2_t __s0_41 = __p0_41; \ + __ret_41 = splat_lane_s32(__s0_41, __p1_41); \ __ret_41; \ }) +#else +#define vdup_lane_s32(__p0_42, __p1_42) __extension__ ({ \ + int32x2_t __ret_42; \ + int32x2_t __s0_42 = __p0_42; \ + int32x2_t __rev0_42; __rev0_42 = __builtin_shufflevector(__s0_42, __s0_42, 1, 0); \ + __ret_42 = __noswap_splat_lane_s32(__rev0_42, __p1_42); \ + __ret_42 = __builtin_shufflevector(__ret_42, __ret_42, 1, 0); \ + __ret_42; \ +}) +#endif + +#define vdup_lane_s64(__p0_43, __p1_43) __extension__ ({ \ + int64x1_t __ret_43; \ + int64x1_t __s0_43 = __p0_43; \ + __ret_43 = splat_lane_s64(__s0_43, __p1_43); \ + __ret_43; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vdup_lane_s16(__p0_44, __p1_44) __extension__ ({ \ + int16x4_t __ret_44; \ + int16x4_t __s0_44 = __p0_44; \ + __ret_44 = splat_lane_s16(__s0_44, __p1_44); \ + __ret_44; \ +}) +#else +#define vdup_lane_s16(__p0_45, __p1_45) __extension__ ({ \ + int16x4_t __ret_45; \ + int16x4_t __s0_45 = __p0_45; \ + int16x4_t __rev0_45; __rev0_45 = __builtin_shufflevector(__s0_45, __s0_45, 3, 2, 1, 0); \ + __ret_45 = __noswap_splat_lane_s16(__rev0_45, __p1_45); \ + __ret_45 = __builtin_shufflevector(__ret_45, __ret_45, 3, 2, 1, 0); \ + __ret_45; \ +}) #endif #ifdef __LITTLE_ENDIAN__ @@ -14668,245 +14698,245 @@ __ai int16x4_t vmla_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) { #endif #ifdef __LITTLE_ENDIAN__ -#define vmlaq_lane_u32(__p0_42, __p1_42, __p2_42, __p3_42) __extension__ ({ \ - uint32x4_t __ret_42; \ - uint32x4_t __s0_42 = __p0_42; \ - uint32x4_t __s1_42 = __p1_42; \ - uint32x2_t __s2_42 = __p2_42; \ - __ret_42 = __s0_42 + __s1_42 * splatq_lane_u32(__s2_42, __p3_42); \ - __ret_42; \ -}) -#else -#define vmlaq_lane_u32(__p0_43, __p1_43, __p2_43, __p3_43) __extension__ ({ \ - uint32x4_t __ret_43; \ - uint32x4_t __s0_43 = __p0_43; \ - uint32x4_t __s1_43 = __p1_43; \ - uint32x2_t __s2_43 = __p2_43; \ - uint32x4_t __rev0_43; __rev0_43 = __builtin_shufflevector(__s0_43, __s0_43, 3, 2, 1, 0); \ - uint32x4_t __rev1_43; __rev1_43 = __builtin_shufflevector(__s1_43, __s1_43, 3, 2, 1, 0); \ - uint32x2_t __rev2_43; __rev2_43 = __builtin_shufflevector(__s2_43, __s2_43, 1, 0); \ - __ret_43 = __rev0_43 + __rev1_43 * __noswap_splatq_lane_u32(__rev2_43, __p3_43); \ - __ret_43 = __builtin_shufflevector(__ret_43, __ret_43, 3, 2, 1, 0); \ - __ret_43; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmlaq_lane_u16(__p0_44, __p1_44, __p2_44, __p3_44) __extension__ ({ \ - uint16x8_t __ret_44; \ - uint16x8_t __s0_44 = __p0_44; \ - uint16x8_t __s1_44 = __p1_44; \ - uint16x4_t __s2_44 = __p2_44; \ - __ret_44 = __s0_44 + __s1_44 * splatq_lane_u16(__s2_44, __p3_44); \ - __ret_44; \ -}) -#else -#define vmlaq_lane_u16(__p0_45, __p1_45, __p2_45, __p3_45) __extension__ ({ \ - uint16x8_t __ret_45; \ - uint16x8_t __s0_45 = __p0_45; \ - uint16x8_t __s1_45 = __p1_45; \ - uint16x4_t __s2_45 = __p2_45; \ - uint16x8_t __rev0_45; __rev0_45 = __builtin_shufflevector(__s0_45, __s0_45, 7, 6, 5, 4, 3, 2, 1, 0); \ - uint16x8_t __rev1_45; __rev1_45 = __builtin_shufflevector(__s1_45, __s1_45, 7, 6, 5, 4, 3, 2, 1, 0); \ - uint16x4_t __rev2_45; __rev2_45 = __builtin_shufflevector(__s2_45, __s2_45, 3, 2, 1, 0); \ - __ret_45 = __rev0_45 + __rev1_45 * __noswap_splatq_lane_u16(__rev2_45, __p3_45); \ - __ret_45 = __builtin_shufflevector(__ret_45, __ret_45, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_45; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmlaq_lane_f32(__p0_46, __p1_46, __p2_46, __p3_46) __extension__ ({ \ - float32x4_t __ret_46; \ - float32x4_t __s0_46 = __p0_46; \ - float32x4_t __s1_46 = __p1_46; \ - float32x2_t __s2_46 = __p2_46; \ - __ret_46 = __s0_46 + __s1_46 * splatq_lane_f32(__s2_46, __p3_46); \ +#define vmlaq_lane_u32(__p0_46, __p1_46, __p2_46, __p3_46) __extension__ ({ \ + uint32x4_t __ret_46; \ + uint32x4_t __s0_46 = __p0_46; \ + uint32x4_t __s1_46 = __p1_46; \ + uint32x2_t __s2_46 = __p2_46; \ + __ret_46 = __s0_46 + __s1_46 * splatq_lane_u32(__s2_46, __p3_46); \ __ret_46; \ }) #else -#define vmlaq_lane_f32(__p0_47, __p1_47, __p2_47, __p3_47) __extension__ ({ \ - float32x4_t __ret_47; \ - float32x4_t __s0_47 = __p0_47; \ - float32x4_t __s1_47 = __p1_47; \ - float32x2_t __s2_47 = __p2_47; \ - float32x4_t __rev0_47; __rev0_47 = __builtin_shufflevector(__s0_47, __s0_47, 3, 2, 1, 0); \ - float32x4_t __rev1_47; __rev1_47 = __builtin_shufflevector(__s1_47, __s1_47, 3, 2, 1, 0); \ - float32x2_t __rev2_47; __rev2_47 = __builtin_shufflevector(__s2_47, __s2_47, 1, 0); \ - __ret_47 = __rev0_47 + __rev1_47 * __noswap_splatq_lane_f32(__rev2_47, __p3_47); \ +#define vmlaq_lane_u32(__p0_47, __p1_47, __p2_47, __p3_47) __extension__ ({ \ + uint32x4_t __ret_47; \ + uint32x4_t __s0_47 = __p0_47; \ + uint32x4_t __s1_47 = __p1_47; \ + uint32x2_t __s2_47 = __p2_47; \ + uint32x4_t __rev0_47; __rev0_47 = __builtin_shufflevector(__s0_47, __s0_47, 3, 2, 1, 0); \ + uint32x4_t __rev1_47; __rev1_47 = __builtin_shufflevector(__s1_47, __s1_47, 3, 2, 1, 0); \ + uint32x2_t __rev2_47; __rev2_47 = __builtin_shufflevector(__s2_47, __s2_47, 1, 0); \ + __ret_47 = __rev0_47 + __rev1_47 * __noswap_splatq_lane_u32(__rev2_47, __p3_47); \ __ret_47 = __builtin_shufflevector(__ret_47, __ret_47, 3, 2, 1, 0); \ __ret_47; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vmlaq_lane_s32(__p0_48, __p1_48, __p2_48, __p3_48) __extension__ ({ \ - int32x4_t __ret_48; \ - int32x4_t __s0_48 = __p0_48; \ - int32x4_t __s1_48 = __p1_48; \ - int32x2_t __s2_48 = __p2_48; \ - __ret_48 = __s0_48 + __s1_48 * splatq_lane_s32(__s2_48, __p3_48); \ +#define vmlaq_lane_u16(__p0_48, __p1_48, __p2_48, __p3_48) __extension__ ({ \ + uint16x8_t __ret_48; \ + uint16x8_t __s0_48 = __p0_48; \ + uint16x8_t __s1_48 = __p1_48; \ + uint16x4_t __s2_48 = __p2_48; \ + __ret_48 = __s0_48 + __s1_48 * splatq_lane_u16(__s2_48, __p3_48); \ __ret_48; \ }) #else -#define vmlaq_lane_s32(__p0_49, __p1_49, __p2_49, __p3_49) __extension__ ({ \ - int32x4_t __ret_49; \ - int32x4_t __s0_49 = __p0_49; \ - int32x4_t __s1_49 = __p1_49; \ - int32x2_t __s2_49 = __p2_49; \ - int32x4_t __rev0_49; __rev0_49 = __builtin_shufflevector(__s0_49, __s0_49, 3, 2, 1, 0); \ - int32x4_t __rev1_49; __rev1_49 = __builtin_shufflevector(__s1_49, __s1_49, 3, 2, 1, 0); \ - int32x2_t __rev2_49; __rev2_49 = __builtin_shufflevector(__s2_49, __s2_49, 1, 0); \ - __ret_49 = __rev0_49 + __rev1_49 * __noswap_splatq_lane_s32(__rev2_49, __p3_49); \ - __ret_49 = __builtin_shufflevector(__ret_49, __ret_49, 3, 2, 1, 0); \ +#define vmlaq_lane_u16(__p0_49, __p1_49, __p2_49, __p3_49) __extension__ ({ \ + uint16x8_t __ret_49; \ + uint16x8_t __s0_49 = __p0_49; \ + uint16x8_t __s1_49 = __p1_49; \ + uint16x4_t __s2_49 = __p2_49; \ + uint16x8_t __rev0_49; __rev0_49 = __builtin_shufflevector(__s0_49, __s0_49, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint16x8_t __rev1_49; __rev1_49 = __builtin_shufflevector(__s1_49, __s1_49, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint16x4_t __rev2_49; __rev2_49 = __builtin_shufflevector(__s2_49, __s2_49, 3, 2, 1, 0); \ + __ret_49 = __rev0_49 + __rev1_49 * __noswap_splatq_lane_u16(__rev2_49, __p3_49); \ + __ret_49 = __builtin_shufflevector(__ret_49, __ret_49, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_49; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vmlaq_lane_s16(__p0_50, __p1_50, __p2_50, __p3_50) __extension__ ({ \ - int16x8_t __ret_50; \ - int16x8_t __s0_50 = __p0_50; \ - int16x8_t __s1_50 = __p1_50; \ - int16x4_t __s2_50 = __p2_50; \ - __ret_50 = __s0_50 + __s1_50 * splatq_lane_s16(__s2_50, __p3_50); \ +#define vmlaq_lane_f32(__p0_50, __p1_50, __p2_50, __p3_50) __extension__ ({ \ + float32x4_t __ret_50; \ + float32x4_t __s0_50 = __p0_50; \ + float32x4_t __s1_50 = __p1_50; \ + float32x2_t __s2_50 = __p2_50; \ + __ret_50 = __s0_50 + __s1_50 * splatq_lane_f32(__s2_50, __p3_50); \ __ret_50; \ }) #else -#define vmlaq_lane_s16(__p0_51, __p1_51, __p2_51, __p3_51) __extension__ ({ \ - int16x8_t __ret_51; \ - int16x8_t __s0_51 = __p0_51; \ - int16x8_t __s1_51 = __p1_51; \ - int16x4_t __s2_51 = __p2_51; \ - int16x8_t __rev0_51; __rev0_51 = __builtin_shufflevector(__s0_51, __s0_51, 7, 6, 5, 4, 3, 2, 1, 0); \ - int16x8_t __rev1_51; __rev1_51 = __builtin_shufflevector(__s1_51, __s1_51, 7, 6, 5, 4, 3, 2, 1, 0); \ - int16x4_t __rev2_51; __rev2_51 = __builtin_shufflevector(__s2_51, __s2_51, 3, 2, 1, 0); \ - __ret_51 = __rev0_51 + __rev1_51 * __noswap_splatq_lane_s16(__rev2_51, __p3_51); \ - __ret_51 = __builtin_shufflevector(__ret_51, __ret_51, 7, 6, 5, 4, 3, 2, 1, 0); \ +#define vmlaq_lane_f32(__p0_51, __p1_51, __p2_51, __p3_51) __extension__ ({ \ + float32x4_t __ret_51; \ + float32x4_t __s0_51 = __p0_51; \ + float32x4_t __s1_51 = __p1_51; \ + float32x2_t __s2_51 = __p2_51; \ + float32x4_t __rev0_51; __rev0_51 = __builtin_shufflevector(__s0_51, __s0_51, 3, 2, 1, 0); \ + float32x4_t __rev1_51; __rev1_51 = __builtin_shufflevector(__s1_51, __s1_51, 3, 2, 1, 0); \ + float32x2_t __rev2_51; __rev2_51 = __builtin_shufflevector(__s2_51, __s2_51, 1, 0); \ + __ret_51 = __rev0_51 + __rev1_51 * __noswap_splatq_lane_f32(__rev2_51, __p3_51); \ + __ret_51 = __builtin_shufflevector(__ret_51, __ret_51, 3, 2, 1, 0); \ __ret_51; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vmla_lane_u32(__p0_52, __p1_52, __p2_52, __p3_52) __extension__ ({ \ - uint32x2_t __ret_52; \ - uint32x2_t __s0_52 = __p0_52; \ - uint32x2_t __s1_52 = __p1_52; \ - uint32x2_t __s2_52 = __p2_52; \ - __ret_52 = __s0_52 + __s1_52 * splat_lane_u32(__s2_52, __p3_52); \ +#define vmlaq_lane_s32(__p0_52, __p1_52, __p2_52, __p3_52) __extension__ ({ \ + int32x4_t __ret_52; \ + int32x4_t __s0_52 = __p0_52; \ + int32x4_t __s1_52 = __p1_52; \ + int32x2_t __s2_52 = __p2_52; \ + __ret_52 = __s0_52 + __s1_52 * splatq_lane_s32(__s2_52, __p3_52); \ __ret_52; \ }) #else -#define vmla_lane_u32(__p0_53, __p1_53, __p2_53, __p3_53) __extension__ ({ \ - uint32x2_t __ret_53; \ - uint32x2_t __s0_53 = __p0_53; \ - uint32x2_t __s1_53 = __p1_53; \ - uint32x2_t __s2_53 = __p2_53; \ - uint32x2_t __rev0_53; __rev0_53 = __builtin_shufflevector(__s0_53, __s0_53, 1, 0); \ - uint32x2_t __rev1_53; __rev1_53 = __builtin_shufflevector(__s1_53, __s1_53, 1, 0); \ - uint32x2_t __rev2_53; __rev2_53 = __builtin_shufflevector(__s2_53, __s2_53, 1, 0); \ - __ret_53 = __rev0_53 + __rev1_53 * __noswap_splat_lane_u32(__rev2_53, __p3_53); \ - __ret_53 = __builtin_shufflevector(__ret_53, __ret_53, 1, 0); \ +#define vmlaq_lane_s32(__p0_53, __p1_53, __p2_53, __p3_53) __extension__ ({ \ + int32x4_t __ret_53; \ + int32x4_t __s0_53 = __p0_53; \ + int32x4_t __s1_53 = __p1_53; \ + int32x2_t __s2_53 = __p2_53; \ + int32x4_t __rev0_53; __rev0_53 = __builtin_shufflevector(__s0_53, __s0_53, 3, 2, 1, 0); \ + int32x4_t __rev1_53; __rev1_53 = __builtin_shufflevector(__s1_53, __s1_53, 3, 2, 1, 0); \ + int32x2_t __rev2_53; __rev2_53 = __builtin_shufflevector(__s2_53, __s2_53, 1, 0); \ + __ret_53 = __rev0_53 + __rev1_53 * __noswap_splatq_lane_s32(__rev2_53, __p3_53); \ + __ret_53 = __builtin_shufflevector(__ret_53, __ret_53, 3, 2, 1, 0); \ __ret_53; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vmla_lane_u16(__p0_54, __p1_54, __p2_54, __p3_54) __extension__ ({ \ - uint16x4_t __ret_54; \ - uint16x4_t __s0_54 = __p0_54; \ - uint16x4_t __s1_54 = __p1_54; \ - uint16x4_t __s2_54 = __p2_54; \ - __ret_54 = __s0_54 + __s1_54 * splat_lane_u16(__s2_54, __p3_54); \ +#define vmlaq_lane_s16(__p0_54, __p1_54, __p2_54, __p3_54) __extension__ ({ \ + int16x8_t __ret_54; \ + int16x8_t __s0_54 = __p0_54; \ + int16x8_t __s1_54 = __p1_54; \ + int16x4_t __s2_54 = __p2_54; \ + __ret_54 = __s0_54 + __s1_54 * splatq_lane_s16(__s2_54, __p3_54); \ __ret_54; \ }) #else -#define vmla_lane_u16(__p0_55, __p1_55, __p2_55, __p3_55) __extension__ ({ \ - uint16x4_t __ret_55; \ - uint16x4_t __s0_55 = __p0_55; \ - uint16x4_t __s1_55 = __p1_55; \ - uint16x4_t __s2_55 = __p2_55; \ - uint16x4_t __rev0_55; __rev0_55 = __builtin_shufflevector(__s0_55, __s0_55, 3, 2, 1, 0); \ - uint16x4_t __rev1_55; __rev1_55 = __builtin_shufflevector(__s1_55, __s1_55, 3, 2, 1, 0); \ - uint16x4_t __rev2_55; __rev2_55 = __builtin_shufflevector(__s2_55, __s2_55, 3, 2, 1, 0); \ - __ret_55 = __rev0_55 + __rev1_55 * __noswap_splat_lane_u16(__rev2_55, __p3_55); \ - __ret_55 = __builtin_shufflevector(__ret_55, __ret_55, 3, 2, 1, 0); \ +#define vmlaq_lane_s16(__p0_55, __p1_55, __p2_55, __p3_55) __extension__ ({ \ + int16x8_t __ret_55; \ + int16x8_t __s0_55 = __p0_55; \ + int16x8_t __s1_55 = __p1_55; \ + int16x4_t __s2_55 = __p2_55; \ + int16x8_t __rev0_55; __rev0_55 = __builtin_shufflevector(__s0_55, __s0_55, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __rev1_55; __rev1_55 = __builtin_shufflevector(__s1_55, __s1_55, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x4_t __rev2_55; __rev2_55 = __builtin_shufflevector(__s2_55, __s2_55, 3, 2, 1, 0); \ + __ret_55 = __rev0_55 + __rev1_55 * __noswap_splatq_lane_s16(__rev2_55, __p3_55); \ + __ret_55 = __builtin_shufflevector(__ret_55, __ret_55, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_55; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vmla_lane_f32(__p0_56, __p1_56, __p2_56, __p3_56) __extension__ ({ \ - float32x2_t __ret_56; \ - float32x2_t __s0_56 = __p0_56; \ - float32x2_t __s1_56 = __p1_56; \ - float32x2_t __s2_56 = __p2_56; \ - __ret_56 = __s0_56 + __s1_56 * splat_lane_f32(__s2_56, __p3_56); \ +#define vmla_lane_u32(__p0_56, __p1_56, __p2_56, __p3_56) __extension__ ({ \ + uint32x2_t __ret_56; \ + uint32x2_t __s0_56 = __p0_56; \ + uint32x2_t __s1_56 = __p1_56; \ + uint32x2_t __s2_56 = __p2_56; \ + __ret_56 = __s0_56 + __s1_56 * splat_lane_u32(__s2_56, __p3_56); \ __ret_56; \ }) #else -#define vmla_lane_f32(__p0_57, __p1_57, __p2_57, __p3_57) __extension__ ({ \ - float32x2_t __ret_57; \ - float32x2_t __s0_57 = __p0_57; \ - float32x2_t __s1_57 = __p1_57; \ - float32x2_t __s2_57 = __p2_57; \ - float32x2_t __rev0_57; __rev0_57 = __builtin_shufflevector(__s0_57, __s0_57, 1, 0); \ - float32x2_t __rev1_57; __rev1_57 = __builtin_shufflevector(__s1_57, __s1_57, 1, 0); \ - float32x2_t __rev2_57; __rev2_57 = __builtin_shufflevector(__s2_57, __s2_57, 1, 0); \ - __ret_57 = __rev0_57 + __rev1_57 * __noswap_splat_lane_f32(__rev2_57, __p3_57); \ +#define vmla_lane_u32(__p0_57, __p1_57, __p2_57, __p3_57) __extension__ ({ \ + uint32x2_t __ret_57; \ + uint32x2_t __s0_57 = __p0_57; \ + uint32x2_t __s1_57 = __p1_57; \ + uint32x2_t __s2_57 = __p2_57; \ + uint32x2_t __rev0_57; __rev0_57 = __builtin_shufflevector(__s0_57, __s0_57, 1, 0); \ + uint32x2_t __rev1_57; __rev1_57 = __builtin_shufflevector(__s1_57, __s1_57, 1, 0); \ + uint32x2_t __rev2_57; __rev2_57 = __builtin_shufflevector(__s2_57, __s2_57, 1, 0); \ + __ret_57 = __rev0_57 + __rev1_57 * __noswap_splat_lane_u32(__rev2_57, __p3_57); \ __ret_57 = __builtin_shufflevector(__ret_57, __ret_57, 1, 0); \ __ret_57; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vmla_lane_s32(__p0_58, __p1_58, __p2_58, __p3_58) __extension__ ({ \ - int32x2_t __ret_58; \ - int32x2_t __s0_58 = __p0_58; \ - int32x2_t __s1_58 = __p1_58; \ - int32x2_t __s2_58 = __p2_58; \ - __ret_58 = __s0_58 + __s1_58 * splat_lane_s32(__s2_58, __p3_58); \ +#define vmla_lane_u16(__p0_58, __p1_58, __p2_58, __p3_58) __extension__ ({ \ + uint16x4_t __ret_58; \ + uint16x4_t __s0_58 = __p0_58; \ + uint16x4_t __s1_58 = __p1_58; \ + uint16x4_t __s2_58 = __p2_58; \ + __ret_58 = __s0_58 + __s1_58 * splat_lane_u16(__s2_58, __p3_58); \ __ret_58; \ }) #else -#define vmla_lane_s32(__p0_59, __p1_59, __p2_59, __p3_59) __extension__ ({ \ - int32x2_t __ret_59; \ - int32x2_t __s0_59 = __p0_59; \ - int32x2_t __s1_59 = __p1_59; \ - int32x2_t __s2_59 = __p2_59; \ - int32x2_t __rev0_59; __rev0_59 = __builtin_shufflevector(__s0_59, __s0_59, 1, 0); \ - int32x2_t __rev1_59; __rev1_59 = __builtin_shufflevector(__s1_59, __s1_59, 1, 0); \ - int32x2_t __rev2_59; __rev2_59 = __builtin_shufflevector(__s2_59, __s2_59, 1, 0); \ - __ret_59 = __rev0_59 + __rev1_59 * __noswap_splat_lane_s32(__rev2_59, __p3_59); \ - __ret_59 = __builtin_shufflevector(__ret_59, __ret_59, 1, 0); \ +#define vmla_lane_u16(__p0_59, __p1_59, __p2_59, __p3_59) __extension__ ({ \ + uint16x4_t __ret_59; \ + uint16x4_t __s0_59 = __p0_59; \ + uint16x4_t __s1_59 = __p1_59; \ + uint16x4_t __s2_59 = __p2_59; \ + uint16x4_t __rev0_59; __rev0_59 = __builtin_shufflevector(__s0_59, __s0_59, 3, 2, 1, 0); \ + uint16x4_t __rev1_59; __rev1_59 = __builtin_shufflevector(__s1_59, __s1_59, 3, 2, 1, 0); \ + uint16x4_t __rev2_59; __rev2_59 = __builtin_shufflevector(__s2_59, __s2_59, 3, 2, 1, 0); \ + __ret_59 = __rev0_59 + __rev1_59 * __noswap_splat_lane_u16(__rev2_59, __p3_59); \ + __ret_59 = __builtin_shufflevector(__ret_59, __ret_59, 3, 2, 1, 0); \ __ret_59; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vmla_lane_s16(__p0_60, __p1_60, __p2_60, __p3_60) __extension__ ({ \ - int16x4_t __ret_60; \ - int16x4_t __s0_60 = __p0_60; \ - int16x4_t __s1_60 = __p1_60; \ - int16x4_t __s2_60 = __p2_60; \ - __ret_60 = __s0_60 + __s1_60 * splat_lane_s16(__s2_60, __p3_60); \ +#define vmla_lane_f32(__p0_60, __p1_60, __p2_60, __p3_60) __extension__ ({ \ + float32x2_t __ret_60; \ + float32x2_t __s0_60 = __p0_60; \ + float32x2_t __s1_60 = __p1_60; \ + float32x2_t __s2_60 = __p2_60; \ + __ret_60 = __s0_60 + __s1_60 * splat_lane_f32(__s2_60, __p3_60); \ __ret_60; \ }) #else -#define vmla_lane_s16(__p0_61, __p1_61, __p2_61, __p3_61) __extension__ ({ \ - int16x4_t __ret_61; \ - int16x4_t __s0_61 = __p0_61; \ - int16x4_t __s1_61 = __p1_61; \ - int16x4_t __s2_61 = __p2_61; \ - int16x4_t __rev0_61; __rev0_61 = __builtin_shufflevector(__s0_61, __s0_61, 3, 2, 1, 0); \ - int16x4_t __rev1_61; __rev1_61 = __builtin_shufflevector(__s1_61, __s1_61, 3, 2, 1, 0); \ - int16x4_t __rev2_61; __rev2_61 = __builtin_shufflevector(__s2_61, __s2_61, 3, 2, 1, 0); \ - __ret_61 = __rev0_61 + __rev1_61 * __noswap_splat_lane_s16(__rev2_61, __p3_61); \ - __ret_61 = __builtin_shufflevector(__ret_61, __ret_61, 3, 2, 1, 0); \ +#define vmla_lane_f32(__p0_61, __p1_61, __p2_61, __p3_61) __extension__ ({ \ + float32x2_t __ret_61; \ + float32x2_t __s0_61 = __p0_61; \ + float32x2_t __s1_61 = __p1_61; \ + float32x2_t __s2_61 = __p2_61; \ + float32x2_t __rev0_61; __rev0_61 = __builtin_shufflevector(__s0_61, __s0_61, 1, 0); \ + float32x2_t __rev1_61; __rev1_61 = __builtin_shufflevector(__s1_61, __s1_61, 1, 0); \ + float32x2_t __rev2_61; __rev2_61 = __builtin_shufflevector(__s2_61, __s2_61, 1, 0); \ + __ret_61 = __rev0_61 + __rev1_61 * __noswap_splat_lane_f32(__rev2_61, __p3_61); \ + __ret_61 = __builtin_shufflevector(__ret_61, __ret_61, 1, 0); \ __ret_61; \ }) #endif +#ifdef __LITTLE_ENDIAN__ +#define vmla_lane_s32(__p0_62, __p1_62, __p2_62, __p3_62) __extension__ ({ \ + int32x2_t __ret_62; \ + int32x2_t __s0_62 = __p0_62; \ + int32x2_t __s1_62 = __p1_62; \ + int32x2_t __s2_62 = __p2_62; \ + __ret_62 = __s0_62 + __s1_62 * splat_lane_s32(__s2_62, __p3_62); \ + __ret_62; \ +}) +#else +#define vmla_lane_s32(__p0_63, __p1_63, __p2_63, __p3_63) __extension__ ({ \ + int32x2_t __ret_63; \ + int32x2_t __s0_63 = __p0_63; \ + int32x2_t __s1_63 = __p1_63; \ + int32x2_t __s2_63 = __p2_63; \ + int32x2_t __rev0_63; __rev0_63 = __builtin_shufflevector(__s0_63, __s0_63, 1, 0); \ + int32x2_t __rev1_63; __rev1_63 = __builtin_shufflevector(__s1_63, __s1_63, 1, 0); \ + int32x2_t __rev2_63; __rev2_63 = __builtin_shufflevector(__s2_63, __s2_63, 1, 0); \ + __ret_63 = __rev0_63 + __rev1_63 * __noswap_splat_lane_s32(__rev2_63, __p3_63); \ + __ret_63 = __builtin_shufflevector(__ret_63, __ret_63, 1, 0); \ + __ret_63; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmla_lane_s16(__p0_64, __p1_64, __p2_64, __p3_64) __extension__ ({ \ + int16x4_t __ret_64; \ + int16x4_t __s0_64 = __p0_64; \ + int16x4_t __s1_64 = __p1_64; \ + int16x4_t __s2_64 = __p2_64; \ + __ret_64 = __s0_64 + __s1_64 * splat_lane_s16(__s2_64, __p3_64); \ + __ret_64; \ +}) +#else +#define vmla_lane_s16(__p0_65, __p1_65, __p2_65, __p3_65) __extension__ ({ \ + int16x4_t __ret_65; \ + int16x4_t __s0_65 = __p0_65; \ + int16x4_t __s1_65 = __p1_65; \ + int16x4_t __s2_65 = __p2_65; \ + int16x4_t __rev0_65; __rev0_65 = __builtin_shufflevector(__s0_65, __s0_65, 3, 2, 1, 0); \ + int16x4_t __rev1_65; __rev1_65 = __builtin_shufflevector(__s1_65, __s1_65, 3, 2, 1, 0); \ + int16x4_t __rev2_65; __rev2_65 = __builtin_shufflevector(__s2_65, __s2_65, 3, 2, 1, 0); \ + __ret_65 = __rev0_65 + __rev1_65 * __noswap_splat_lane_s16(__rev2_65, __p3_65); \ + __ret_65 = __builtin_shufflevector(__ret_65, __ret_65, 3, 2, 1, 0); \ + __ret_65; \ +}) +#endif + #ifdef __LITTLE_ENDIAN__ __ai uint32x4_t vmlaq_n_u32(uint32x4_t __p0, uint32x4_t __p1, uint32_t __p2) { uint32x4_t __ret; @@ -15330,245 +15360,245 @@ __ai int16x4_t vmls_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) { #endif #ifdef __LITTLE_ENDIAN__ -#define vmlsq_lane_u32(__p0_62, __p1_62, __p2_62, __p3_62) __extension__ ({ \ - uint32x4_t __ret_62; \ - uint32x4_t __s0_62 = __p0_62; \ - uint32x4_t __s1_62 = __p1_62; \ - uint32x2_t __s2_62 = __p2_62; \ - __ret_62 = __s0_62 - __s1_62 * splatq_lane_u32(__s2_62, __p3_62); \ - __ret_62; \ -}) -#else -#define vmlsq_lane_u32(__p0_63, __p1_63, __p2_63, __p3_63) __extension__ ({ \ - uint32x4_t __ret_63; \ - uint32x4_t __s0_63 = __p0_63; \ - uint32x4_t __s1_63 = __p1_63; \ - uint32x2_t __s2_63 = __p2_63; \ - uint32x4_t __rev0_63; __rev0_63 = __builtin_shufflevector(__s0_63, __s0_63, 3, 2, 1, 0); \ - uint32x4_t __rev1_63; __rev1_63 = __builtin_shufflevector(__s1_63, __s1_63, 3, 2, 1, 0); \ - uint32x2_t __rev2_63; __rev2_63 = __builtin_shufflevector(__s2_63, __s2_63, 1, 0); \ - __ret_63 = __rev0_63 - __rev1_63 * __noswap_splatq_lane_u32(__rev2_63, __p3_63); \ - __ret_63 = __builtin_shufflevector(__ret_63, __ret_63, 3, 2, 1, 0); \ - __ret_63; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmlsq_lane_u16(__p0_64, __p1_64, __p2_64, __p3_64) __extension__ ({ \ - uint16x8_t __ret_64; \ - uint16x8_t __s0_64 = __p0_64; \ - uint16x8_t __s1_64 = __p1_64; \ - uint16x4_t __s2_64 = __p2_64; \ - __ret_64 = __s0_64 - __s1_64 * splatq_lane_u16(__s2_64, __p3_64); \ - __ret_64; \ -}) -#else -#define vmlsq_lane_u16(__p0_65, __p1_65, __p2_65, __p3_65) __extension__ ({ \ - uint16x8_t __ret_65; \ - uint16x8_t __s0_65 = __p0_65; \ - uint16x8_t __s1_65 = __p1_65; \ - uint16x4_t __s2_65 = __p2_65; \ - uint16x8_t __rev0_65; __rev0_65 = __builtin_shufflevector(__s0_65, __s0_65, 7, 6, 5, 4, 3, 2, 1, 0); \ - uint16x8_t __rev1_65; __rev1_65 = __builtin_shufflevector(__s1_65, __s1_65, 7, 6, 5, 4, 3, 2, 1, 0); \ - uint16x4_t __rev2_65; __rev2_65 = __builtin_shufflevector(__s2_65, __s2_65, 3, 2, 1, 0); \ - __ret_65 = __rev0_65 - __rev1_65 * __noswap_splatq_lane_u16(__rev2_65, __p3_65); \ - __ret_65 = __builtin_shufflevector(__ret_65, __ret_65, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_65; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmlsq_lane_f32(__p0_66, __p1_66, __p2_66, __p3_66) __extension__ ({ \ - float32x4_t __ret_66; \ - float32x4_t __s0_66 = __p0_66; \ - float32x4_t __s1_66 = __p1_66; \ - float32x2_t __s2_66 = __p2_66; \ - __ret_66 = __s0_66 - __s1_66 * splatq_lane_f32(__s2_66, __p3_66); \ +#define vmlsq_lane_u32(__p0_66, __p1_66, __p2_66, __p3_66) __extension__ ({ \ + uint32x4_t __ret_66; \ + uint32x4_t __s0_66 = __p0_66; \ + uint32x4_t __s1_66 = __p1_66; \ + uint32x2_t __s2_66 = __p2_66; \ + __ret_66 = __s0_66 - __s1_66 * splatq_lane_u32(__s2_66, __p3_66); \ __ret_66; \ }) #else -#define vmlsq_lane_f32(__p0_67, __p1_67, __p2_67, __p3_67) __extension__ ({ \ - float32x4_t __ret_67; \ - float32x4_t __s0_67 = __p0_67; \ - float32x4_t __s1_67 = __p1_67; \ - float32x2_t __s2_67 = __p2_67; \ - float32x4_t __rev0_67; __rev0_67 = __builtin_shufflevector(__s0_67, __s0_67, 3, 2, 1, 0); \ - float32x4_t __rev1_67; __rev1_67 = __builtin_shufflevector(__s1_67, __s1_67, 3, 2, 1, 0); \ - float32x2_t __rev2_67; __rev2_67 = __builtin_shufflevector(__s2_67, __s2_67, 1, 0); \ - __ret_67 = __rev0_67 - __rev1_67 * __noswap_splatq_lane_f32(__rev2_67, __p3_67); \ +#define vmlsq_lane_u32(__p0_67, __p1_67, __p2_67, __p3_67) __extension__ ({ \ + uint32x4_t __ret_67; \ + uint32x4_t __s0_67 = __p0_67; \ + uint32x4_t __s1_67 = __p1_67; \ + uint32x2_t __s2_67 = __p2_67; \ + uint32x4_t __rev0_67; __rev0_67 = __builtin_shufflevector(__s0_67, __s0_67, 3, 2, 1, 0); \ + uint32x4_t __rev1_67; __rev1_67 = __builtin_shufflevector(__s1_67, __s1_67, 3, 2, 1, 0); \ + uint32x2_t __rev2_67; __rev2_67 = __builtin_shufflevector(__s2_67, __s2_67, 1, 0); \ + __ret_67 = __rev0_67 - __rev1_67 * __noswap_splatq_lane_u32(__rev2_67, __p3_67); \ __ret_67 = __builtin_shufflevector(__ret_67, __ret_67, 3, 2, 1, 0); \ __ret_67; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vmlsq_lane_s32(__p0_68, __p1_68, __p2_68, __p3_68) __extension__ ({ \ - int32x4_t __ret_68; \ - int32x4_t __s0_68 = __p0_68; \ - int32x4_t __s1_68 = __p1_68; \ - int32x2_t __s2_68 = __p2_68; \ - __ret_68 = __s0_68 - __s1_68 * splatq_lane_s32(__s2_68, __p3_68); \ +#define vmlsq_lane_u16(__p0_68, __p1_68, __p2_68, __p3_68) __extension__ ({ \ + uint16x8_t __ret_68; \ + uint16x8_t __s0_68 = __p0_68; \ + uint16x8_t __s1_68 = __p1_68; \ + uint16x4_t __s2_68 = __p2_68; \ + __ret_68 = __s0_68 - __s1_68 * splatq_lane_u16(__s2_68, __p3_68); \ __ret_68; \ }) #else -#define vmlsq_lane_s32(__p0_69, __p1_69, __p2_69, __p3_69) __extension__ ({ \ - int32x4_t __ret_69; \ - int32x4_t __s0_69 = __p0_69; \ - int32x4_t __s1_69 = __p1_69; \ - int32x2_t __s2_69 = __p2_69; \ - int32x4_t __rev0_69; __rev0_69 = __builtin_shufflevector(__s0_69, __s0_69, 3, 2, 1, 0); \ - int32x4_t __rev1_69; __rev1_69 = __builtin_shufflevector(__s1_69, __s1_69, 3, 2, 1, 0); \ - int32x2_t __rev2_69; __rev2_69 = __builtin_shufflevector(__s2_69, __s2_69, 1, 0); \ - __ret_69 = __rev0_69 - __rev1_69 * __noswap_splatq_lane_s32(__rev2_69, __p3_69); \ - __ret_69 = __builtin_shufflevector(__ret_69, __ret_69, 3, 2, 1, 0); \ +#define vmlsq_lane_u16(__p0_69, __p1_69, __p2_69, __p3_69) __extension__ ({ \ + uint16x8_t __ret_69; \ + uint16x8_t __s0_69 = __p0_69; \ + uint16x8_t __s1_69 = __p1_69; \ + uint16x4_t __s2_69 = __p2_69; \ + uint16x8_t __rev0_69; __rev0_69 = __builtin_shufflevector(__s0_69, __s0_69, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint16x8_t __rev1_69; __rev1_69 = __builtin_shufflevector(__s1_69, __s1_69, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint16x4_t __rev2_69; __rev2_69 = __builtin_shufflevector(__s2_69, __s2_69, 3, 2, 1, 0); \ + __ret_69 = __rev0_69 - __rev1_69 * __noswap_splatq_lane_u16(__rev2_69, __p3_69); \ + __ret_69 = __builtin_shufflevector(__ret_69, __ret_69, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_69; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vmlsq_lane_s16(__p0_70, __p1_70, __p2_70, __p3_70) __extension__ ({ \ - int16x8_t __ret_70; \ - int16x8_t __s0_70 = __p0_70; \ - int16x8_t __s1_70 = __p1_70; \ - int16x4_t __s2_70 = __p2_70; \ - __ret_70 = __s0_70 - __s1_70 * splatq_lane_s16(__s2_70, __p3_70); \ +#define vmlsq_lane_f32(__p0_70, __p1_70, __p2_70, __p3_70) __extension__ ({ \ + float32x4_t __ret_70; \ + float32x4_t __s0_70 = __p0_70; \ + float32x4_t __s1_70 = __p1_70; \ + float32x2_t __s2_70 = __p2_70; \ + __ret_70 = __s0_70 - __s1_70 * splatq_lane_f32(__s2_70, __p3_70); \ __ret_70; \ }) #else -#define vmlsq_lane_s16(__p0_71, __p1_71, __p2_71, __p3_71) __extension__ ({ \ - int16x8_t __ret_71; \ - int16x8_t __s0_71 = __p0_71; \ - int16x8_t __s1_71 = __p1_71; \ - int16x4_t __s2_71 = __p2_71; \ - int16x8_t __rev0_71; __rev0_71 = __builtin_shufflevector(__s0_71, __s0_71, 7, 6, 5, 4, 3, 2, 1, 0); \ - int16x8_t __rev1_71; __rev1_71 = __builtin_shufflevector(__s1_71, __s1_71, 7, 6, 5, 4, 3, 2, 1, 0); \ - int16x4_t __rev2_71; __rev2_71 = __builtin_shufflevector(__s2_71, __s2_71, 3, 2, 1, 0); \ - __ret_71 = __rev0_71 - __rev1_71 * __noswap_splatq_lane_s16(__rev2_71, __p3_71); \ - __ret_71 = __builtin_shufflevector(__ret_71, __ret_71, 7, 6, 5, 4, 3, 2, 1, 0); \ +#define vmlsq_lane_f32(__p0_71, __p1_71, __p2_71, __p3_71) __extension__ ({ \ + float32x4_t __ret_71; \ + float32x4_t __s0_71 = __p0_71; \ + float32x4_t __s1_71 = __p1_71; \ + float32x2_t __s2_71 = __p2_71; \ + float32x4_t __rev0_71; __rev0_71 = __builtin_shufflevector(__s0_71, __s0_71, 3, 2, 1, 0); \ + float32x4_t __rev1_71; __rev1_71 = __builtin_shufflevector(__s1_71, __s1_71, 3, 2, 1, 0); \ + float32x2_t __rev2_71; __rev2_71 = __builtin_shufflevector(__s2_71, __s2_71, 1, 0); \ + __ret_71 = __rev0_71 - __rev1_71 * __noswap_splatq_lane_f32(__rev2_71, __p3_71); \ + __ret_71 = __builtin_shufflevector(__ret_71, __ret_71, 3, 2, 1, 0); \ __ret_71; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vmls_lane_u32(__p0_72, __p1_72, __p2_72, __p3_72) __extension__ ({ \ - uint32x2_t __ret_72; \ - uint32x2_t __s0_72 = __p0_72; \ - uint32x2_t __s1_72 = __p1_72; \ - uint32x2_t __s2_72 = __p2_72; \ - __ret_72 = __s0_72 - __s1_72 * splat_lane_u32(__s2_72, __p3_72); \ +#define vmlsq_lane_s32(__p0_72, __p1_72, __p2_72, __p3_72) __extension__ ({ \ + int32x4_t __ret_72; \ + int32x4_t __s0_72 = __p0_72; \ + int32x4_t __s1_72 = __p1_72; \ + int32x2_t __s2_72 = __p2_72; \ + __ret_72 = __s0_72 - __s1_72 * splatq_lane_s32(__s2_72, __p3_72); \ __ret_72; \ }) #else -#define vmls_lane_u32(__p0_73, __p1_73, __p2_73, __p3_73) __extension__ ({ \ - uint32x2_t __ret_73; \ - uint32x2_t __s0_73 = __p0_73; \ - uint32x2_t __s1_73 = __p1_73; \ - uint32x2_t __s2_73 = __p2_73; \ - uint32x2_t __rev0_73; __rev0_73 = __builtin_shufflevector(__s0_73, __s0_73, 1, 0); \ - uint32x2_t __rev1_73; __rev1_73 = __builtin_shufflevector(__s1_73, __s1_73, 1, 0); \ - uint32x2_t __rev2_73; __rev2_73 = __builtin_shufflevector(__s2_73, __s2_73, 1, 0); \ - __ret_73 = __rev0_73 - __rev1_73 * __noswap_splat_lane_u32(__rev2_73, __p3_73); \ - __ret_73 = __builtin_shufflevector(__ret_73, __ret_73, 1, 0); \ +#define vmlsq_lane_s32(__p0_73, __p1_73, __p2_73, __p3_73) __extension__ ({ \ + int32x4_t __ret_73; \ + int32x4_t __s0_73 = __p0_73; \ + int32x4_t __s1_73 = __p1_73; \ + int32x2_t __s2_73 = __p2_73; \ + int32x4_t __rev0_73; __rev0_73 = __builtin_shufflevector(__s0_73, __s0_73, 3, 2, 1, 0); \ + int32x4_t __rev1_73; __rev1_73 = __builtin_shufflevector(__s1_73, __s1_73, 3, 2, 1, 0); \ + int32x2_t __rev2_73; __rev2_73 = __builtin_shufflevector(__s2_73, __s2_73, 1, 0); \ + __ret_73 = __rev0_73 - __rev1_73 * __noswap_splatq_lane_s32(__rev2_73, __p3_73); \ + __ret_73 = __builtin_shufflevector(__ret_73, __ret_73, 3, 2, 1, 0); \ __ret_73; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vmls_lane_u16(__p0_74, __p1_74, __p2_74, __p3_74) __extension__ ({ \ - uint16x4_t __ret_74; \ - uint16x4_t __s0_74 = __p0_74; \ - uint16x4_t __s1_74 = __p1_74; \ - uint16x4_t __s2_74 = __p2_74; \ - __ret_74 = __s0_74 - __s1_74 * splat_lane_u16(__s2_74, __p3_74); \ +#define vmlsq_lane_s16(__p0_74, __p1_74, __p2_74, __p3_74) __extension__ ({ \ + int16x8_t __ret_74; \ + int16x8_t __s0_74 = __p0_74; \ + int16x8_t __s1_74 = __p1_74; \ + int16x4_t __s2_74 = __p2_74; \ + __ret_74 = __s0_74 - __s1_74 * splatq_lane_s16(__s2_74, __p3_74); \ __ret_74; \ }) #else -#define vmls_lane_u16(__p0_75, __p1_75, __p2_75, __p3_75) __extension__ ({ \ - uint16x4_t __ret_75; \ - uint16x4_t __s0_75 = __p0_75; \ - uint16x4_t __s1_75 = __p1_75; \ - uint16x4_t __s2_75 = __p2_75; \ - uint16x4_t __rev0_75; __rev0_75 = __builtin_shufflevector(__s0_75, __s0_75, 3, 2, 1, 0); \ - uint16x4_t __rev1_75; __rev1_75 = __builtin_shufflevector(__s1_75, __s1_75, 3, 2, 1, 0); \ - uint16x4_t __rev2_75; __rev2_75 = __builtin_shufflevector(__s2_75, __s2_75, 3, 2, 1, 0); \ - __ret_75 = __rev0_75 - __rev1_75 * __noswap_splat_lane_u16(__rev2_75, __p3_75); \ - __ret_75 = __builtin_shufflevector(__ret_75, __ret_75, 3, 2, 1, 0); \ +#define vmlsq_lane_s16(__p0_75, __p1_75, __p2_75, __p3_75) __extension__ ({ \ + int16x8_t __ret_75; \ + int16x8_t __s0_75 = __p0_75; \ + int16x8_t __s1_75 = __p1_75; \ + int16x4_t __s2_75 = __p2_75; \ + int16x8_t __rev0_75; __rev0_75 = __builtin_shufflevector(__s0_75, __s0_75, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __rev1_75; __rev1_75 = __builtin_shufflevector(__s1_75, __s1_75, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x4_t __rev2_75; __rev2_75 = __builtin_shufflevector(__s2_75, __s2_75, 3, 2, 1, 0); \ + __ret_75 = __rev0_75 - __rev1_75 * __noswap_splatq_lane_s16(__rev2_75, __p3_75); \ + __ret_75 = __builtin_shufflevector(__ret_75, __ret_75, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_75; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vmls_lane_f32(__p0_76, __p1_76, __p2_76, __p3_76) __extension__ ({ \ - float32x2_t __ret_76; \ - float32x2_t __s0_76 = __p0_76; \ - float32x2_t __s1_76 = __p1_76; \ - float32x2_t __s2_76 = __p2_76; \ - __ret_76 = __s0_76 - __s1_76 * splat_lane_f32(__s2_76, __p3_76); \ +#define vmls_lane_u32(__p0_76, __p1_76, __p2_76, __p3_76) __extension__ ({ \ + uint32x2_t __ret_76; \ + uint32x2_t __s0_76 = __p0_76; \ + uint32x2_t __s1_76 = __p1_76; \ + uint32x2_t __s2_76 = __p2_76; \ + __ret_76 = __s0_76 - __s1_76 * splat_lane_u32(__s2_76, __p3_76); \ __ret_76; \ }) #else -#define vmls_lane_f32(__p0_77, __p1_77, __p2_77, __p3_77) __extension__ ({ \ - float32x2_t __ret_77; \ - float32x2_t __s0_77 = __p0_77; \ - float32x2_t __s1_77 = __p1_77; \ - float32x2_t __s2_77 = __p2_77; \ - float32x2_t __rev0_77; __rev0_77 = __builtin_shufflevector(__s0_77, __s0_77, 1, 0); \ - float32x2_t __rev1_77; __rev1_77 = __builtin_shufflevector(__s1_77, __s1_77, 1, 0); \ - float32x2_t __rev2_77; __rev2_77 = __builtin_shufflevector(__s2_77, __s2_77, 1, 0); \ - __ret_77 = __rev0_77 - __rev1_77 * __noswap_splat_lane_f32(__rev2_77, __p3_77); \ +#define vmls_lane_u32(__p0_77, __p1_77, __p2_77, __p3_77) __extension__ ({ \ + uint32x2_t __ret_77; \ + uint32x2_t __s0_77 = __p0_77; \ + uint32x2_t __s1_77 = __p1_77; \ + uint32x2_t __s2_77 = __p2_77; \ + uint32x2_t __rev0_77; __rev0_77 = __builtin_shufflevector(__s0_77, __s0_77, 1, 0); \ + uint32x2_t __rev1_77; __rev1_77 = __builtin_shufflevector(__s1_77, __s1_77, 1, 0); \ + uint32x2_t __rev2_77; __rev2_77 = __builtin_shufflevector(__s2_77, __s2_77, 1, 0); \ + __ret_77 = __rev0_77 - __rev1_77 * __noswap_splat_lane_u32(__rev2_77, __p3_77); \ __ret_77 = __builtin_shufflevector(__ret_77, __ret_77, 1, 0); \ __ret_77; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vmls_lane_s32(__p0_78, __p1_78, __p2_78, __p3_78) __extension__ ({ \ - int32x2_t __ret_78; \ - int32x2_t __s0_78 = __p0_78; \ - int32x2_t __s1_78 = __p1_78; \ - int32x2_t __s2_78 = __p2_78; \ - __ret_78 = __s0_78 - __s1_78 * splat_lane_s32(__s2_78, __p3_78); \ +#define vmls_lane_u16(__p0_78, __p1_78, __p2_78, __p3_78) __extension__ ({ \ + uint16x4_t __ret_78; \ + uint16x4_t __s0_78 = __p0_78; \ + uint16x4_t __s1_78 = __p1_78; \ + uint16x4_t __s2_78 = __p2_78; \ + __ret_78 = __s0_78 - __s1_78 * splat_lane_u16(__s2_78, __p3_78); \ __ret_78; \ }) #else -#define vmls_lane_s32(__p0_79, __p1_79, __p2_79, __p3_79) __extension__ ({ \ - int32x2_t __ret_79; \ - int32x2_t __s0_79 = __p0_79; \ - int32x2_t __s1_79 = __p1_79; \ - int32x2_t __s2_79 = __p2_79; \ - int32x2_t __rev0_79; __rev0_79 = __builtin_shufflevector(__s0_79, __s0_79, 1, 0); \ - int32x2_t __rev1_79; __rev1_79 = __builtin_shufflevector(__s1_79, __s1_79, 1, 0); \ - int32x2_t __rev2_79; __rev2_79 = __builtin_shufflevector(__s2_79, __s2_79, 1, 0); \ - __ret_79 = __rev0_79 - __rev1_79 * __noswap_splat_lane_s32(__rev2_79, __p3_79); \ - __ret_79 = __builtin_shufflevector(__ret_79, __ret_79, 1, 0); \ +#define vmls_lane_u16(__p0_79, __p1_79, __p2_79, __p3_79) __extension__ ({ \ + uint16x4_t __ret_79; \ + uint16x4_t __s0_79 = __p0_79; \ + uint16x4_t __s1_79 = __p1_79; \ + uint16x4_t __s2_79 = __p2_79; \ + uint16x4_t __rev0_79; __rev0_79 = __builtin_shufflevector(__s0_79, __s0_79, 3, 2, 1, 0); \ + uint16x4_t __rev1_79; __rev1_79 = __builtin_shufflevector(__s1_79, __s1_79, 3, 2, 1, 0); \ + uint16x4_t __rev2_79; __rev2_79 = __builtin_shufflevector(__s2_79, __s2_79, 3, 2, 1, 0); \ + __ret_79 = __rev0_79 - __rev1_79 * __noswap_splat_lane_u16(__rev2_79, __p3_79); \ + __ret_79 = __builtin_shufflevector(__ret_79, __ret_79, 3, 2, 1, 0); \ __ret_79; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vmls_lane_s16(__p0_80, __p1_80, __p2_80, __p3_80) __extension__ ({ \ - int16x4_t __ret_80; \ - int16x4_t __s0_80 = __p0_80; \ - int16x4_t __s1_80 = __p1_80; \ - int16x4_t __s2_80 = __p2_80; \ - __ret_80 = __s0_80 - __s1_80 * splat_lane_s16(__s2_80, __p3_80); \ +#define vmls_lane_f32(__p0_80, __p1_80, __p2_80, __p3_80) __extension__ ({ \ + float32x2_t __ret_80; \ + float32x2_t __s0_80 = __p0_80; \ + float32x2_t __s1_80 = __p1_80; \ + float32x2_t __s2_80 = __p2_80; \ + __ret_80 = __s0_80 - __s1_80 * splat_lane_f32(__s2_80, __p3_80); \ __ret_80; \ }) #else -#define vmls_lane_s16(__p0_81, __p1_81, __p2_81, __p3_81) __extension__ ({ \ - int16x4_t __ret_81; \ - int16x4_t __s0_81 = __p0_81; \ - int16x4_t __s1_81 = __p1_81; \ - int16x4_t __s2_81 = __p2_81; \ - int16x4_t __rev0_81; __rev0_81 = __builtin_shufflevector(__s0_81, __s0_81, 3, 2, 1, 0); \ - int16x4_t __rev1_81; __rev1_81 = __builtin_shufflevector(__s1_81, __s1_81, 3, 2, 1, 0); \ - int16x4_t __rev2_81; __rev2_81 = __builtin_shufflevector(__s2_81, __s2_81, 3, 2, 1, 0); \ - __ret_81 = __rev0_81 - __rev1_81 * __noswap_splat_lane_s16(__rev2_81, __p3_81); \ - __ret_81 = __builtin_shufflevector(__ret_81, __ret_81, 3, 2, 1, 0); \ +#define vmls_lane_f32(__p0_81, __p1_81, __p2_81, __p3_81) __extension__ ({ \ + float32x2_t __ret_81; \ + float32x2_t __s0_81 = __p0_81; \ + float32x2_t __s1_81 = __p1_81; \ + float32x2_t __s2_81 = __p2_81; \ + float32x2_t __rev0_81; __rev0_81 = __builtin_shufflevector(__s0_81, __s0_81, 1, 0); \ + float32x2_t __rev1_81; __rev1_81 = __builtin_shufflevector(__s1_81, __s1_81, 1, 0); \ + float32x2_t __rev2_81; __rev2_81 = __builtin_shufflevector(__s2_81, __s2_81, 1, 0); \ + __ret_81 = __rev0_81 - __rev1_81 * __noswap_splat_lane_f32(__rev2_81, __p3_81); \ + __ret_81 = __builtin_shufflevector(__ret_81, __ret_81, 1, 0); \ __ret_81; \ }) #endif +#ifdef __LITTLE_ENDIAN__ +#define vmls_lane_s32(__p0_82, __p1_82, __p2_82, __p3_82) __extension__ ({ \ + int32x2_t __ret_82; \ + int32x2_t __s0_82 = __p0_82; \ + int32x2_t __s1_82 = __p1_82; \ + int32x2_t __s2_82 = __p2_82; \ + __ret_82 = __s0_82 - __s1_82 * splat_lane_s32(__s2_82, __p3_82); \ + __ret_82; \ +}) +#else +#define vmls_lane_s32(__p0_83, __p1_83, __p2_83, __p3_83) __extension__ ({ \ + int32x2_t __ret_83; \ + int32x2_t __s0_83 = __p0_83; \ + int32x2_t __s1_83 = __p1_83; \ + int32x2_t __s2_83 = __p2_83; \ + int32x2_t __rev0_83; __rev0_83 = __builtin_shufflevector(__s0_83, __s0_83, 1, 0); \ + int32x2_t __rev1_83; __rev1_83 = __builtin_shufflevector(__s1_83, __s1_83, 1, 0); \ + int32x2_t __rev2_83; __rev2_83 = __builtin_shufflevector(__s2_83, __s2_83, 1, 0); \ + __ret_83 = __rev0_83 - __rev1_83 * __noswap_splat_lane_s32(__rev2_83, __p3_83); \ + __ret_83 = __builtin_shufflevector(__ret_83, __ret_83, 1, 0); \ + __ret_83; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmls_lane_s16(__p0_84, __p1_84, __p2_84, __p3_84) __extension__ ({ \ + int16x4_t __ret_84; \ + int16x4_t __s0_84 = __p0_84; \ + int16x4_t __s1_84 = __p1_84; \ + int16x4_t __s2_84 = __p2_84; \ + __ret_84 = __s0_84 - __s1_84 * splat_lane_s16(__s2_84, __p3_84); \ + __ret_84; \ +}) +#else +#define vmls_lane_s16(__p0_85, __p1_85, __p2_85, __p3_85) __extension__ ({ \ + int16x4_t __ret_85; \ + int16x4_t __s0_85 = __p0_85; \ + int16x4_t __s1_85 = __p1_85; \ + int16x4_t __s2_85 = __p2_85; \ + int16x4_t __rev0_85; __rev0_85 = __builtin_shufflevector(__s0_85, __s0_85, 3, 2, 1, 0); \ + int16x4_t __rev1_85; __rev1_85 = __builtin_shufflevector(__s1_85, __s1_85, 3, 2, 1, 0); \ + int16x4_t __rev2_85; __rev2_85 = __builtin_shufflevector(__s2_85, __s2_85, 3, 2, 1, 0); \ + __ret_85 = __rev0_85 - __rev1_85 * __noswap_splat_lane_s16(__rev2_85, __p3_85); \ + __ret_85 = __builtin_shufflevector(__ret_85, __ret_85, 3, 2, 1, 0); \ + __ret_85; \ +}) +#endif + #ifdef __LITTLE_ENDIAN__ __ai uint32x4_t vmlsq_n_u32(uint32x4_t __p0, uint32x4_t __p1, uint32_t __p2) { uint32x4_t __ret; @@ -16608,215 +16638,215 @@ __ai poly8x16_t vmulq_p8(poly8x16_t __p0, poly8x16_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -#define vmulq_lane_u32(__p0_82, __p1_82, __p2_82) __extension__ ({ \ - uint32x4_t __ret_82; \ - uint32x4_t __s0_82 = __p0_82; \ - uint32x2_t __s1_82 = __p1_82; \ - __ret_82 = __s0_82 * splatq_lane_u32(__s1_82, __p2_82); \ - __ret_82; \ -}) -#else -#define vmulq_lane_u32(__p0_83, __p1_83, __p2_83) __extension__ ({ \ - uint32x4_t __ret_83; \ - uint32x4_t __s0_83 = __p0_83; \ - uint32x2_t __s1_83 = __p1_83; \ - uint32x4_t __rev0_83; __rev0_83 = __builtin_shufflevector(__s0_83, __s0_83, 3, 2, 1, 0); \ - uint32x2_t __rev1_83; __rev1_83 = __builtin_shufflevector(__s1_83, __s1_83, 1, 0); \ - __ret_83 = __rev0_83 * __noswap_splatq_lane_u32(__rev1_83, __p2_83); \ - __ret_83 = __builtin_shufflevector(__ret_83, __ret_83, 3, 2, 1, 0); \ - __ret_83; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmulq_lane_u16(__p0_84, __p1_84, __p2_84) __extension__ ({ \ - uint16x8_t __ret_84; \ - uint16x8_t __s0_84 = __p0_84; \ - uint16x4_t __s1_84 = __p1_84; \ - __ret_84 = __s0_84 * splatq_lane_u16(__s1_84, __p2_84); \ - __ret_84; \ -}) -#else -#define vmulq_lane_u16(__p0_85, __p1_85, __p2_85) __extension__ ({ \ - uint16x8_t __ret_85; \ - uint16x8_t __s0_85 = __p0_85; \ - uint16x4_t __s1_85 = __p1_85; \ - uint16x8_t __rev0_85; __rev0_85 = __builtin_shufflevector(__s0_85, __s0_85, 7, 6, 5, 4, 3, 2, 1, 0); \ - uint16x4_t __rev1_85; __rev1_85 = __builtin_shufflevector(__s1_85, __s1_85, 3, 2, 1, 0); \ - __ret_85 = __rev0_85 * __noswap_splatq_lane_u16(__rev1_85, __p2_85); \ - __ret_85 = __builtin_shufflevector(__ret_85, __ret_85, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_85; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmulq_lane_f32(__p0_86, __p1_86, __p2_86) __extension__ ({ \ - float32x4_t __ret_86; \ - float32x4_t __s0_86 = __p0_86; \ - float32x2_t __s1_86 = __p1_86; \ - __ret_86 = __s0_86 * splatq_lane_f32(__s1_86, __p2_86); \ +#define vmulq_lane_u32(__p0_86, __p1_86, __p2_86) __extension__ ({ \ + uint32x4_t __ret_86; \ + uint32x4_t __s0_86 = __p0_86; \ + uint32x2_t __s1_86 = __p1_86; \ + __ret_86 = __s0_86 * splatq_lane_u32(__s1_86, __p2_86); \ __ret_86; \ }) #else -#define vmulq_lane_f32(__p0_87, __p1_87, __p2_87) __extension__ ({ \ - float32x4_t __ret_87; \ - float32x4_t __s0_87 = __p0_87; \ - float32x2_t __s1_87 = __p1_87; \ - float32x4_t __rev0_87; __rev0_87 = __builtin_shufflevector(__s0_87, __s0_87, 3, 2, 1, 0); \ - float32x2_t __rev1_87; __rev1_87 = __builtin_shufflevector(__s1_87, __s1_87, 1, 0); \ - __ret_87 = __rev0_87 * __noswap_splatq_lane_f32(__rev1_87, __p2_87); \ +#define vmulq_lane_u32(__p0_87, __p1_87, __p2_87) __extension__ ({ \ + uint32x4_t __ret_87; \ + uint32x4_t __s0_87 = __p0_87; \ + uint32x2_t __s1_87 = __p1_87; \ + uint32x4_t __rev0_87; __rev0_87 = __builtin_shufflevector(__s0_87, __s0_87, 3, 2, 1, 0); \ + uint32x2_t __rev1_87; __rev1_87 = __builtin_shufflevector(__s1_87, __s1_87, 1, 0); \ + __ret_87 = __rev0_87 * __noswap_splatq_lane_u32(__rev1_87, __p2_87); \ __ret_87 = __builtin_shufflevector(__ret_87, __ret_87, 3, 2, 1, 0); \ __ret_87; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vmulq_lane_s32(__p0_88, __p1_88, __p2_88) __extension__ ({ \ - int32x4_t __ret_88; \ - int32x4_t __s0_88 = __p0_88; \ - int32x2_t __s1_88 = __p1_88; \ - __ret_88 = __s0_88 * splatq_lane_s32(__s1_88, __p2_88); \ +#define vmulq_lane_u16(__p0_88, __p1_88, __p2_88) __extension__ ({ \ + uint16x8_t __ret_88; \ + uint16x8_t __s0_88 = __p0_88; \ + uint16x4_t __s1_88 = __p1_88; \ + __ret_88 = __s0_88 * splatq_lane_u16(__s1_88, __p2_88); \ __ret_88; \ }) #else -#define vmulq_lane_s32(__p0_89, __p1_89, __p2_89) __extension__ ({ \ - int32x4_t __ret_89; \ - int32x4_t __s0_89 = __p0_89; \ - int32x2_t __s1_89 = __p1_89; \ - int32x4_t __rev0_89; __rev0_89 = __builtin_shufflevector(__s0_89, __s0_89, 3, 2, 1, 0); \ - int32x2_t __rev1_89; __rev1_89 = __builtin_shufflevector(__s1_89, __s1_89, 1, 0); \ - __ret_89 = __rev0_89 * __noswap_splatq_lane_s32(__rev1_89, __p2_89); \ - __ret_89 = __builtin_shufflevector(__ret_89, __ret_89, 3, 2, 1, 0); \ +#define vmulq_lane_u16(__p0_89, __p1_89, __p2_89) __extension__ ({ \ + uint16x8_t __ret_89; \ + uint16x8_t __s0_89 = __p0_89; \ + uint16x4_t __s1_89 = __p1_89; \ + uint16x8_t __rev0_89; __rev0_89 = __builtin_shufflevector(__s0_89, __s0_89, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint16x4_t __rev1_89; __rev1_89 = __builtin_shufflevector(__s1_89, __s1_89, 3, 2, 1, 0); \ + __ret_89 = __rev0_89 * __noswap_splatq_lane_u16(__rev1_89, __p2_89); \ + __ret_89 = __builtin_shufflevector(__ret_89, __ret_89, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_89; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vmulq_lane_s16(__p0_90, __p1_90, __p2_90) __extension__ ({ \ - int16x8_t __ret_90; \ - int16x8_t __s0_90 = __p0_90; \ - int16x4_t __s1_90 = __p1_90; \ - __ret_90 = __s0_90 * splatq_lane_s16(__s1_90, __p2_90); \ +#define vmulq_lane_f32(__p0_90, __p1_90, __p2_90) __extension__ ({ \ + float32x4_t __ret_90; \ + float32x4_t __s0_90 = __p0_90; \ + float32x2_t __s1_90 = __p1_90; \ + __ret_90 = __s0_90 * splatq_lane_f32(__s1_90, __p2_90); \ __ret_90; \ }) #else -#define vmulq_lane_s16(__p0_91, __p1_91, __p2_91) __extension__ ({ \ - int16x8_t __ret_91; \ - int16x8_t __s0_91 = __p0_91; \ - int16x4_t __s1_91 = __p1_91; \ - int16x8_t __rev0_91; __rev0_91 = __builtin_shufflevector(__s0_91, __s0_91, 7, 6, 5, 4, 3, 2, 1, 0); \ - int16x4_t __rev1_91; __rev1_91 = __builtin_shufflevector(__s1_91, __s1_91, 3, 2, 1, 0); \ - __ret_91 = __rev0_91 * __noswap_splatq_lane_s16(__rev1_91, __p2_91); \ - __ret_91 = __builtin_shufflevector(__ret_91, __ret_91, 7, 6, 5, 4, 3, 2, 1, 0); \ +#define vmulq_lane_f32(__p0_91, __p1_91, __p2_91) __extension__ ({ \ + float32x4_t __ret_91; \ + float32x4_t __s0_91 = __p0_91; \ + float32x2_t __s1_91 = __p1_91; \ + float32x4_t __rev0_91; __rev0_91 = __builtin_shufflevector(__s0_91, __s0_91, 3, 2, 1, 0); \ + float32x2_t __rev1_91; __rev1_91 = __builtin_shufflevector(__s1_91, __s1_91, 1, 0); \ + __ret_91 = __rev0_91 * __noswap_splatq_lane_f32(__rev1_91, __p2_91); \ + __ret_91 = __builtin_shufflevector(__ret_91, __ret_91, 3, 2, 1, 0); \ __ret_91; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vmul_lane_u32(__p0_92, __p1_92, __p2_92) __extension__ ({ \ - uint32x2_t __ret_92; \ - uint32x2_t __s0_92 = __p0_92; \ - uint32x2_t __s1_92 = __p1_92; \ - __ret_92 = __s0_92 * splat_lane_u32(__s1_92, __p2_92); \ +#define vmulq_lane_s32(__p0_92, __p1_92, __p2_92) __extension__ ({ \ + int32x4_t __ret_92; \ + int32x4_t __s0_92 = __p0_92; \ + int32x2_t __s1_92 = __p1_92; \ + __ret_92 = __s0_92 * splatq_lane_s32(__s1_92, __p2_92); \ __ret_92; \ }) #else -#define vmul_lane_u32(__p0_93, __p1_93, __p2_93) __extension__ ({ \ - uint32x2_t __ret_93; \ - uint32x2_t __s0_93 = __p0_93; \ - uint32x2_t __s1_93 = __p1_93; \ - uint32x2_t __rev0_93; __rev0_93 = __builtin_shufflevector(__s0_93, __s0_93, 1, 0); \ - uint32x2_t __rev1_93; __rev1_93 = __builtin_shufflevector(__s1_93, __s1_93, 1, 0); \ - __ret_93 = __rev0_93 * __noswap_splat_lane_u32(__rev1_93, __p2_93); \ - __ret_93 = __builtin_shufflevector(__ret_93, __ret_93, 1, 0); \ +#define vmulq_lane_s32(__p0_93, __p1_93, __p2_93) __extension__ ({ \ + int32x4_t __ret_93; \ + int32x4_t __s0_93 = __p0_93; \ + int32x2_t __s1_93 = __p1_93; \ + int32x4_t __rev0_93; __rev0_93 = __builtin_shufflevector(__s0_93, __s0_93, 3, 2, 1, 0); \ + int32x2_t __rev1_93; __rev1_93 = __builtin_shufflevector(__s1_93, __s1_93, 1, 0); \ + __ret_93 = __rev0_93 * __noswap_splatq_lane_s32(__rev1_93, __p2_93); \ + __ret_93 = __builtin_shufflevector(__ret_93, __ret_93, 3, 2, 1, 0); \ __ret_93; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vmul_lane_u16(__p0_94, __p1_94, __p2_94) __extension__ ({ \ - uint16x4_t __ret_94; \ - uint16x4_t __s0_94 = __p0_94; \ - uint16x4_t __s1_94 = __p1_94; \ - __ret_94 = __s0_94 * splat_lane_u16(__s1_94, __p2_94); \ +#define vmulq_lane_s16(__p0_94, __p1_94, __p2_94) __extension__ ({ \ + int16x8_t __ret_94; \ + int16x8_t __s0_94 = __p0_94; \ + int16x4_t __s1_94 = __p1_94; \ + __ret_94 = __s0_94 * splatq_lane_s16(__s1_94, __p2_94); \ __ret_94; \ }) #else -#define vmul_lane_u16(__p0_95, __p1_95, __p2_95) __extension__ ({ \ - uint16x4_t __ret_95; \ - uint16x4_t __s0_95 = __p0_95; \ - uint16x4_t __s1_95 = __p1_95; \ - uint16x4_t __rev0_95; __rev0_95 = __builtin_shufflevector(__s0_95, __s0_95, 3, 2, 1, 0); \ - uint16x4_t __rev1_95; __rev1_95 = __builtin_shufflevector(__s1_95, __s1_95, 3, 2, 1, 0); \ - __ret_95 = __rev0_95 * __noswap_splat_lane_u16(__rev1_95, __p2_95); \ - __ret_95 = __builtin_shufflevector(__ret_95, __ret_95, 3, 2, 1, 0); \ +#define vmulq_lane_s16(__p0_95, __p1_95, __p2_95) __extension__ ({ \ + int16x8_t __ret_95; \ + int16x8_t __s0_95 = __p0_95; \ + int16x4_t __s1_95 = __p1_95; \ + int16x8_t __rev0_95; __rev0_95 = __builtin_shufflevector(__s0_95, __s0_95, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x4_t __rev1_95; __rev1_95 = __builtin_shufflevector(__s1_95, __s1_95, 3, 2, 1, 0); \ + __ret_95 = __rev0_95 * __noswap_splatq_lane_s16(__rev1_95, __p2_95); \ + __ret_95 = __builtin_shufflevector(__ret_95, __ret_95, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_95; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vmul_lane_f32(__p0_96, __p1_96, __p2_96) __extension__ ({ \ - float32x2_t __ret_96; \ - float32x2_t __s0_96 = __p0_96; \ - float32x2_t __s1_96 = __p1_96; \ - __ret_96 = __s0_96 * splat_lane_f32(__s1_96, __p2_96); \ +#define vmul_lane_u32(__p0_96, __p1_96, __p2_96) __extension__ ({ \ + uint32x2_t __ret_96; \ + uint32x2_t __s0_96 = __p0_96; \ + uint32x2_t __s1_96 = __p1_96; \ + __ret_96 = __s0_96 * splat_lane_u32(__s1_96, __p2_96); \ __ret_96; \ }) #else -#define vmul_lane_f32(__p0_97, __p1_97, __p2_97) __extension__ ({ \ - float32x2_t __ret_97; \ - float32x2_t __s0_97 = __p0_97; \ - float32x2_t __s1_97 = __p1_97; \ - float32x2_t __rev0_97; __rev0_97 = __builtin_shufflevector(__s0_97, __s0_97, 1, 0); \ - float32x2_t __rev1_97; __rev1_97 = __builtin_shufflevector(__s1_97, __s1_97, 1, 0); \ - __ret_97 = __rev0_97 * __noswap_splat_lane_f32(__rev1_97, __p2_97); \ +#define vmul_lane_u32(__p0_97, __p1_97, __p2_97) __extension__ ({ \ + uint32x2_t __ret_97; \ + uint32x2_t __s0_97 = __p0_97; \ + uint32x2_t __s1_97 = __p1_97; \ + uint32x2_t __rev0_97; __rev0_97 = __builtin_shufflevector(__s0_97, __s0_97, 1, 0); \ + uint32x2_t __rev1_97; __rev1_97 = __builtin_shufflevector(__s1_97, __s1_97, 1, 0); \ + __ret_97 = __rev0_97 * __noswap_splat_lane_u32(__rev1_97, __p2_97); \ __ret_97 = __builtin_shufflevector(__ret_97, __ret_97, 1, 0); \ __ret_97; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vmul_lane_s32(__p0_98, __p1_98, __p2_98) __extension__ ({ \ - int32x2_t __ret_98; \ - int32x2_t __s0_98 = __p0_98; \ - int32x2_t __s1_98 = __p1_98; \ - __ret_98 = __s0_98 * splat_lane_s32(__s1_98, __p2_98); \ +#define vmul_lane_u16(__p0_98, __p1_98, __p2_98) __extension__ ({ \ + uint16x4_t __ret_98; \ + uint16x4_t __s0_98 = __p0_98; \ + uint16x4_t __s1_98 = __p1_98; \ + __ret_98 = __s0_98 * splat_lane_u16(__s1_98, __p2_98); \ __ret_98; \ }) #else -#define vmul_lane_s32(__p0_99, __p1_99, __p2_99) __extension__ ({ \ - int32x2_t __ret_99; \ - int32x2_t __s0_99 = __p0_99; \ - int32x2_t __s1_99 = __p1_99; \ - int32x2_t __rev0_99; __rev0_99 = __builtin_shufflevector(__s0_99, __s0_99, 1, 0); \ - int32x2_t __rev1_99; __rev1_99 = __builtin_shufflevector(__s1_99, __s1_99, 1, 0); \ - __ret_99 = __rev0_99 * __noswap_splat_lane_s32(__rev1_99, __p2_99); \ - __ret_99 = __builtin_shufflevector(__ret_99, __ret_99, 1, 0); \ +#define vmul_lane_u16(__p0_99, __p1_99, __p2_99) __extension__ ({ \ + uint16x4_t __ret_99; \ + uint16x4_t __s0_99 = __p0_99; \ + uint16x4_t __s1_99 = __p1_99; \ + uint16x4_t __rev0_99; __rev0_99 = __builtin_shufflevector(__s0_99, __s0_99, 3, 2, 1, 0); \ + uint16x4_t __rev1_99; __rev1_99 = __builtin_shufflevector(__s1_99, __s1_99, 3, 2, 1, 0); \ + __ret_99 = __rev0_99 * __noswap_splat_lane_u16(__rev1_99, __p2_99); \ + __ret_99 = __builtin_shufflevector(__ret_99, __ret_99, 3, 2, 1, 0); \ __ret_99; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vmul_lane_s16(__p0_100, __p1_100, __p2_100) __extension__ ({ \ - int16x4_t __ret_100; \ - int16x4_t __s0_100 = __p0_100; \ - int16x4_t __s1_100 = __p1_100; \ - __ret_100 = __s0_100 * splat_lane_s16(__s1_100, __p2_100); \ +#define vmul_lane_f32(__p0_100, __p1_100, __p2_100) __extension__ ({ \ + float32x2_t __ret_100; \ + float32x2_t __s0_100 = __p0_100; \ + float32x2_t __s1_100 = __p1_100; \ + __ret_100 = __s0_100 * splat_lane_f32(__s1_100, __p2_100); \ __ret_100; \ }) #else -#define vmul_lane_s16(__p0_101, __p1_101, __p2_101) __extension__ ({ \ - int16x4_t __ret_101; \ - int16x4_t __s0_101 = __p0_101; \ - int16x4_t __s1_101 = __p1_101; \ - int16x4_t __rev0_101; __rev0_101 = __builtin_shufflevector(__s0_101, __s0_101, 3, 2, 1, 0); \ - int16x4_t __rev1_101; __rev1_101 = __builtin_shufflevector(__s1_101, __s1_101, 3, 2, 1, 0); \ - __ret_101 = __rev0_101 * __noswap_splat_lane_s16(__rev1_101, __p2_101); \ - __ret_101 = __builtin_shufflevector(__ret_101, __ret_101, 3, 2, 1, 0); \ +#define vmul_lane_f32(__p0_101, __p1_101, __p2_101) __extension__ ({ \ + float32x2_t __ret_101; \ + float32x2_t __s0_101 = __p0_101; \ + float32x2_t __s1_101 = __p1_101; \ + float32x2_t __rev0_101; __rev0_101 = __builtin_shufflevector(__s0_101, __s0_101, 1, 0); \ + float32x2_t __rev1_101; __rev1_101 = __builtin_shufflevector(__s1_101, __s1_101, 1, 0); \ + __ret_101 = __rev0_101 * __noswap_splat_lane_f32(__rev1_101, __p2_101); \ + __ret_101 = __builtin_shufflevector(__ret_101, __ret_101, 1, 0); \ __ret_101; \ }) #endif +#ifdef __LITTLE_ENDIAN__ +#define vmul_lane_s32(__p0_102, __p1_102, __p2_102) __extension__ ({ \ + int32x2_t __ret_102; \ + int32x2_t __s0_102 = __p0_102; \ + int32x2_t __s1_102 = __p1_102; \ + __ret_102 = __s0_102 * splat_lane_s32(__s1_102, __p2_102); \ + __ret_102; \ +}) +#else +#define vmul_lane_s32(__p0_103, __p1_103, __p2_103) __extension__ ({ \ + int32x2_t __ret_103; \ + int32x2_t __s0_103 = __p0_103; \ + int32x2_t __s1_103 = __p1_103; \ + int32x2_t __rev0_103; __rev0_103 = __builtin_shufflevector(__s0_103, __s0_103, 1, 0); \ + int32x2_t __rev1_103; __rev1_103 = __builtin_shufflevector(__s1_103, __s1_103, 1, 0); \ + __ret_103 = __rev0_103 * __noswap_splat_lane_s32(__rev1_103, __p2_103); \ + __ret_103 = __builtin_shufflevector(__ret_103, __ret_103, 1, 0); \ + __ret_103; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmul_lane_s16(__p0_104, __p1_104, __p2_104) __extension__ ({ \ + int16x4_t __ret_104; \ + int16x4_t __s0_104 = __p0_104; \ + int16x4_t __s1_104 = __p1_104; \ + __ret_104 = __s0_104 * splat_lane_s16(__s1_104, __p2_104); \ + __ret_104; \ +}) +#else +#define vmul_lane_s16(__p0_105, __p1_105, __p2_105) __extension__ ({ \ + int16x4_t __ret_105; \ + int16x4_t __s0_105 = __p0_105; \ + int16x4_t __s1_105 = __p1_105; \ + int16x4_t __rev0_105; __rev0_105 = __builtin_shufflevector(__s0_105, __s0_105, 3, 2, 1, 0); \ + int16x4_t __rev1_105; __rev1_105 = __builtin_shufflevector(__s1_105, __s1_105, 3, 2, 1, 0); \ + __ret_105 = __rev0_105 * __noswap_splat_lane_s16(__rev1_105, __p2_105); \ + __ret_105 = __builtin_shufflevector(__ret_105, __ret_105, 3, 2, 1, 0); \ + __ret_105; \ +}) +#endif + #ifdef __LITTLE_ENDIAN__ __ai uint32x4_t vmulq_n_u32(uint32x4_t __p0, uint32_t __p1) { uint32x4_t __ret; @@ -17132,89 +17162,89 @@ __ai int32x4_t __noswap_vmull_s16(int16x4_t __p0, int16x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -#define vmull_lane_u32(__p0_102, __p1_102, __p2_102) __extension__ ({ \ - uint64x2_t __ret_102; \ - uint32x2_t __s0_102 = __p0_102; \ - uint32x2_t __s1_102 = __p1_102; \ - __ret_102 = vmull_u32(__s0_102, splat_lane_u32(__s1_102, __p2_102)); \ - __ret_102; \ -}) -#else -#define vmull_lane_u32(__p0_103, __p1_103, __p2_103) __extension__ ({ \ - uint64x2_t __ret_103; \ - uint32x2_t __s0_103 = __p0_103; \ - uint32x2_t __s1_103 = __p1_103; \ - uint32x2_t __rev0_103; __rev0_103 = __builtin_shufflevector(__s0_103, __s0_103, 1, 0); \ - uint32x2_t __rev1_103; __rev1_103 = __builtin_shufflevector(__s1_103, __s1_103, 1, 0); \ - __ret_103 = __noswap_vmull_u32(__rev0_103, __noswap_splat_lane_u32(__rev1_103, __p2_103)); \ - __ret_103 = __builtin_shufflevector(__ret_103, __ret_103, 1, 0); \ - __ret_103; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmull_lane_u16(__p0_104, __p1_104, __p2_104) __extension__ ({ \ - uint32x4_t __ret_104; \ - uint16x4_t __s0_104 = __p0_104; \ - uint16x4_t __s1_104 = __p1_104; \ - __ret_104 = vmull_u16(__s0_104, splat_lane_u16(__s1_104, __p2_104)); \ - __ret_104; \ -}) -#else -#define vmull_lane_u16(__p0_105, __p1_105, __p2_105) __extension__ ({ \ - uint32x4_t __ret_105; \ - uint16x4_t __s0_105 = __p0_105; \ - uint16x4_t __s1_105 = __p1_105; \ - uint16x4_t __rev0_105; __rev0_105 = __builtin_shufflevector(__s0_105, __s0_105, 3, 2, 1, 0); \ - uint16x4_t __rev1_105; __rev1_105 = __builtin_shufflevector(__s1_105, __s1_105, 3, 2, 1, 0); \ - __ret_105 = __noswap_vmull_u16(__rev0_105, __noswap_splat_lane_u16(__rev1_105, __p2_105)); \ - __ret_105 = __builtin_shufflevector(__ret_105, __ret_105, 3, 2, 1, 0); \ - __ret_105; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmull_lane_s32(__p0_106, __p1_106, __p2_106) __extension__ ({ \ - int64x2_t __ret_106; \ - int32x2_t __s0_106 = __p0_106; \ - int32x2_t __s1_106 = __p1_106; \ - __ret_106 = vmull_s32(__s0_106, splat_lane_s32(__s1_106, __p2_106)); \ +#define vmull_lane_u32(__p0_106, __p1_106, __p2_106) __extension__ ({ \ + uint64x2_t __ret_106; \ + uint32x2_t __s0_106 = __p0_106; \ + uint32x2_t __s1_106 = __p1_106; \ + __ret_106 = vmull_u32(__s0_106, splat_lane_u32(__s1_106, __p2_106)); \ __ret_106; \ }) #else -#define vmull_lane_s32(__p0_107, __p1_107, __p2_107) __extension__ ({ \ - int64x2_t __ret_107; \ - int32x2_t __s0_107 = __p0_107; \ - int32x2_t __s1_107 = __p1_107; \ - int32x2_t __rev0_107; __rev0_107 = __builtin_shufflevector(__s0_107, __s0_107, 1, 0); \ - int32x2_t __rev1_107; __rev1_107 = __builtin_shufflevector(__s1_107, __s1_107, 1, 0); \ - __ret_107 = __noswap_vmull_s32(__rev0_107, __noswap_splat_lane_s32(__rev1_107, __p2_107)); \ +#define vmull_lane_u32(__p0_107, __p1_107, __p2_107) __extension__ ({ \ + uint64x2_t __ret_107; \ + uint32x2_t __s0_107 = __p0_107; \ + uint32x2_t __s1_107 = __p1_107; \ + uint32x2_t __rev0_107; __rev0_107 = __builtin_shufflevector(__s0_107, __s0_107, 1, 0); \ + uint32x2_t __rev1_107; __rev1_107 = __builtin_shufflevector(__s1_107, __s1_107, 1, 0); \ + __ret_107 = __noswap_vmull_u32(__rev0_107, __noswap_splat_lane_u32(__rev1_107, __p2_107)); \ __ret_107 = __builtin_shufflevector(__ret_107, __ret_107, 1, 0); \ __ret_107; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vmull_lane_s16(__p0_108, __p1_108, __p2_108) __extension__ ({ \ - int32x4_t __ret_108; \ - int16x4_t __s0_108 = __p0_108; \ - int16x4_t __s1_108 = __p1_108; \ - __ret_108 = vmull_s16(__s0_108, splat_lane_s16(__s1_108, __p2_108)); \ +#define vmull_lane_u16(__p0_108, __p1_108, __p2_108) __extension__ ({ \ + uint32x4_t __ret_108; \ + uint16x4_t __s0_108 = __p0_108; \ + uint16x4_t __s1_108 = __p1_108; \ + __ret_108 = vmull_u16(__s0_108, splat_lane_u16(__s1_108, __p2_108)); \ __ret_108; \ }) #else -#define vmull_lane_s16(__p0_109, __p1_109, __p2_109) __extension__ ({ \ - int32x4_t __ret_109; \ - int16x4_t __s0_109 = __p0_109; \ - int16x4_t __s1_109 = __p1_109; \ - int16x4_t __rev0_109; __rev0_109 = __builtin_shufflevector(__s0_109, __s0_109, 3, 2, 1, 0); \ - int16x4_t __rev1_109; __rev1_109 = __builtin_shufflevector(__s1_109, __s1_109, 3, 2, 1, 0); \ - __ret_109 = __noswap_vmull_s16(__rev0_109, __noswap_splat_lane_s16(__rev1_109, __p2_109)); \ +#define vmull_lane_u16(__p0_109, __p1_109, __p2_109) __extension__ ({ \ + uint32x4_t __ret_109; \ + uint16x4_t __s0_109 = __p0_109; \ + uint16x4_t __s1_109 = __p1_109; \ + uint16x4_t __rev0_109; __rev0_109 = __builtin_shufflevector(__s0_109, __s0_109, 3, 2, 1, 0); \ + uint16x4_t __rev1_109; __rev1_109 = __builtin_shufflevector(__s1_109, __s1_109, 3, 2, 1, 0); \ + __ret_109 = __noswap_vmull_u16(__rev0_109, __noswap_splat_lane_u16(__rev1_109, __p2_109)); \ __ret_109 = __builtin_shufflevector(__ret_109, __ret_109, 3, 2, 1, 0); \ __ret_109; \ }) #endif +#ifdef __LITTLE_ENDIAN__ +#define vmull_lane_s32(__p0_110, __p1_110, __p2_110) __extension__ ({ \ + int64x2_t __ret_110; \ + int32x2_t __s0_110 = __p0_110; \ + int32x2_t __s1_110 = __p1_110; \ + __ret_110 = vmull_s32(__s0_110, splat_lane_s32(__s1_110, __p2_110)); \ + __ret_110; \ +}) +#else +#define vmull_lane_s32(__p0_111, __p1_111, __p2_111) __extension__ ({ \ + int64x2_t __ret_111; \ + int32x2_t __s0_111 = __p0_111; \ + int32x2_t __s1_111 = __p1_111; \ + int32x2_t __rev0_111; __rev0_111 = __builtin_shufflevector(__s0_111, __s0_111, 1, 0); \ + int32x2_t __rev1_111; __rev1_111 = __builtin_shufflevector(__s1_111, __s1_111, 1, 0); \ + __ret_111 = __noswap_vmull_s32(__rev0_111, __noswap_splat_lane_s32(__rev1_111, __p2_111)); \ + __ret_111 = __builtin_shufflevector(__ret_111, __ret_111, 1, 0); \ + __ret_111; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmull_lane_s16(__p0_112, __p1_112, __p2_112) __extension__ ({ \ + int32x4_t __ret_112; \ + int16x4_t __s0_112 = __p0_112; \ + int16x4_t __s1_112 = __p1_112; \ + __ret_112 = vmull_s16(__s0_112, splat_lane_s16(__s1_112, __p2_112)); \ + __ret_112; \ +}) +#else +#define vmull_lane_s16(__p0_113, __p1_113, __p2_113) __extension__ ({ \ + int32x4_t __ret_113; \ + int16x4_t __s0_113 = __p0_113; \ + int16x4_t __s1_113 = __p1_113; \ + int16x4_t __rev0_113; __rev0_113 = __builtin_shufflevector(__s0_113, __s0_113, 3, 2, 1, 0); \ + int16x4_t __rev1_113; __rev1_113 = __builtin_shufflevector(__s1_113, __s1_113, 3, 2, 1, 0); \ + __ret_113 = __noswap_vmull_s16(__rev0_113, __noswap_splat_lane_s16(__rev1_113, __p2_113)); \ + __ret_113 = __builtin_shufflevector(__ret_113, __ret_113, 3, 2, 1, 0); \ + __ret_113; \ +}) +#endif + #ifdef __LITTLE_ENDIAN__ __ai uint64x2_t vmull_n_u32(uint32x2_t __p0, uint32_t __p1) { uint64x2_t __ret; @@ -19285,50 +19315,50 @@ __ai int32x4_t __noswap_vqdmlal_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __ #endif #ifdef __LITTLE_ENDIAN__ -#define vqdmlal_lane_s32(__p0_110, __p1_110, __p2_110, __p3_110) __extension__ ({ \ - int64x2_t __ret_110; \ - int64x2_t __s0_110 = __p0_110; \ - int32x2_t __s1_110 = __p1_110; \ - int32x2_t __s2_110 = __p2_110; \ - __ret_110 = vqdmlal_s32(__s0_110, __s1_110, splat_lane_s32(__s2_110, __p3_110)); \ - __ret_110; \ +#define vqdmlal_lane_s32(__p0_114, __p1_114, __p2_114, __p3_114) __extension__ ({ \ + int64x2_t __ret_114; \ + int64x2_t __s0_114 = __p0_114; \ + int32x2_t __s1_114 = __p1_114; \ + int32x2_t __s2_114 = __p2_114; \ + __ret_114 = vqdmlal_s32(__s0_114, __s1_114, splat_lane_s32(__s2_114, __p3_114)); \ + __ret_114; \ }) #else -#define vqdmlal_lane_s32(__p0_111, __p1_111, __p2_111, __p3_111) __extension__ ({ \ - int64x2_t __ret_111; \ - int64x2_t __s0_111 = __p0_111; \ - int32x2_t __s1_111 = __p1_111; \ - int32x2_t __s2_111 = __p2_111; \ - int64x2_t __rev0_111; __rev0_111 = __builtin_shufflevector(__s0_111, __s0_111, 1, 0); \ - int32x2_t __rev1_111; __rev1_111 = __builtin_shufflevector(__s1_111, __s1_111, 1, 0); \ - int32x2_t __rev2_111; __rev2_111 = __builtin_shufflevector(__s2_111, __s2_111, 1, 0); \ - __ret_111 = __noswap_vqdmlal_s32(__rev0_111, __rev1_111, __noswap_splat_lane_s32(__rev2_111, __p3_111)); \ - __ret_111 = __builtin_shufflevector(__ret_111, __ret_111, 1, 0); \ - __ret_111; \ +#define vqdmlal_lane_s32(__p0_115, __p1_115, __p2_115, __p3_115) __extension__ ({ \ + int64x2_t __ret_115; \ + int64x2_t __s0_115 = __p0_115; \ + int32x2_t __s1_115 = __p1_115; \ + int32x2_t __s2_115 = __p2_115; \ + int64x2_t __rev0_115; __rev0_115 = __builtin_shufflevector(__s0_115, __s0_115, 1, 0); \ + int32x2_t __rev1_115; __rev1_115 = __builtin_shufflevector(__s1_115, __s1_115, 1, 0); \ + int32x2_t __rev2_115; __rev2_115 = __builtin_shufflevector(__s2_115, __s2_115, 1, 0); \ + __ret_115 = __noswap_vqdmlal_s32(__rev0_115, __rev1_115, __noswap_splat_lane_s32(__rev2_115, __p3_115)); \ + __ret_115 = __builtin_shufflevector(__ret_115, __ret_115, 1, 0); \ + __ret_115; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vqdmlal_lane_s16(__p0_112, __p1_112, __p2_112, __p3_112) __extension__ ({ \ - int32x4_t __ret_112; \ - int32x4_t __s0_112 = __p0_112; \ - int16x4_t __s1_112 = __p1_112; \ - int16x4_t __s2_112 = __p2_112; \ - __ret_112 = vqdmlal_s16(__s0_112, __s1_112, splat_lane_s16(__s2_112, __p3_112)); \ - __ret_112; \ +#define vqdmlal_lane_s16(__p0_116, __p1_116, __p2_116, __p3_116) __extension__ ({ \ + int32x4_t __ret_116; \ + int32x4_t __s0_116 = __p0_116; \ + int16x4_t __s1_116 = __p1_116; \ + int16x4_t __s2_116 = __p2_116; \ + __ret_116 = vqdmlal_s16(__s0_116, __s1_116, splat_lane_s16(__s2_116, __p3_116)); \ + __ret_116; \ }) #else -#define vqdmlal_lane_s16(__p0_113, __p1_113, __p2_113, __p3_113) __extension__ ({ \ - int32x4_t __ret_113; \ - int32x4_t __s0_113 = __p0_113; \ - int16x4_t __s1_113 = __p1_113; \ - int16x4_t __s2_113 = __p2_113; \ - int32x4_t __rev0_113; __rev0_113 = __builtin_shufflevector(__s0_113, __s0_113, 3, 2, 1, 0); \ - int16x4_t __rev1_113; __rev1_113 = __builtin_shufflevector(__s1_113, __s1_113, 3, 2, 1, 0); \ - int16x4_t __rev2_113; __rev2_113 = __builtin_shufflevector(__s2_113, __s2_113, 3, 2, 1, 0); \ - __ret_113 = __noswap_vqdmlal_s16(__rev0_113, __rev1_113, __noswap_splat_lane_s16(__rev2_113, __p3_113)); \ - __ret_113 = __builtin_shufflevector(__ret_113, __ret_113, 3, 2, 1, 0); \ - __ret_113; \ +#define vqdmlal_lane_s16(__p0_117, __p1_117, __p2_117, __p3_117) __extension__ ({ \ + int32x4_t __ret_117; \ + int32x4_t __s0_117 = __p0_117; \ + int16x4_t __s1_117 = __p1_117; \ + int16x4_t __s2_117 = __p2_117; \ + int32x4_t __rev0_117; __rev0_117 = __builtin_shufflevector(__s0_117, __s0_117, 3, 2, 1, 0); \ + int16x4_t __rev1_117; __rev1_117 = __builtin_shufflevector(__s1_117, __s1_117, 3, 2, 1, 0); \ + int16x4_t __rev2_117; __rev2_117 = __builtin_shufflevector(__s2_117, __s2_117, 3, 2, 1, 0); \ + __ret_117 = __noswap_vqdmlal_s16(__rev0_117, __rev1_117, __noswap_splat_lane_s16(__rev2_117, __p3_117)); \ + __ret_117 = __builtin_shufflevector(__ret_117, __ret_117, 3, 2, 1, 0); \ + __ret_117; \ }) #endif @@ -19423,50 +19453,50 @@ __ai int32x4_t __noswap_vqdmlsl_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __ #endif #ifdef __LITTLE_ENDIAN__ -#define vqdmlsl_lane_s32(__p0_114, __p1_114, __p2_114, __p3_114) __extension__ ({ \ - int64x2_t __ret_114; \ - int64x2_t __s0_114 = __p0_114; \ - int32x2_t __s1_114 = __p1_114; \ - int32x2_t __s2_114 = __p2_114; \ - __ret_114 = vqdmlsl_s32(__s0_114, __s1_114, splat_lane_s32(__s2_114, __p3_114)); \ - __ret_114; \ +#define vqdmlsl_lane_s32(__p0_118, __p1_118, __p2_118, __p3_118) __extension__ ({ \ + int64x2_t __ret_118; \ + int64x2_t __s0_118 = __p0_118; \ + int32x2_t __s1_118 = __p1_118; \ + int32x2_t __s2_118 = __p2_118; \ + __ret_118 = vqdmlsl_s32(__s0_118, __s1_118, splat_lane_s32(__s2_118, __p3_118)); \ + __ret_118; \ }) #else -#define vqdmlsl_lane_s32(__p0_115, __p1_115, __p2_115, __p3_115) __extension__ ({ \ - int64x2_t __ret_115; \ - int64x2_t __s0_115 = __p0_115; \ - int32x2_t __s1_115 = __p1_115; \ - int32x2_t __s2_115 = __p2_115; \ - int64x2_t __rev0_115; __rev0_115 = __builtin_shufflevector(__s0_115, __s0_115, 1, 0); \ - int32x2_t __rev1_115; __rev1_115 = __builtin_shufflevector(__s1_115, __s1_115, 1, 0); \ - int32x2_t __rev2_115; __rev2_115 = __builtin_shufflevector(__s2_115, __s2_115, 1, 0); \ - __ret_115 = __noswap_vqdmlsl_s32(__rev0_115, __rev1_115, __noswap_splat_lane_s32(__rev2_115, __p3_115)); \ - __ret_115 = __builtin_shufflevector(__ret_115, __ret_115, 1, 0); \ - __ret_115; \ +#define vqdmlsl_lane_s32(__p0_119, __p1_119, __p2_119, __p3_119) __extension__ ({ \ + int64x2_t __ret_119; \ + int64x2_t __s0_119 = __p0_119; \ + int32x2_t __s1_119 = __p1_119; \ + int32x2_t __s2_119 = __p2_119; \ + int64x2_t __rev0_119; __rev0_119 = __builtin_shufflevector(__s0_119, __s0_119, 1, 0); \ + int32x2_t __rev1_119; __rev1_119 = __builtin_shufflevector(__s1_119, __s1_119, 1, 0); \ + int32x2_t __rev2_119; __rev2_119 = __builtin_shufflevector(__s2_119, __s2_119, 1, 0); \ + __ret_119 = __noswap_vqdmlsl_s32(__rev0_119, __rev1_119, __noswap_splat_lane_s32(__rev2_119, __p3_119)); \ + __ret_119 = __builtin_shufflevector(__ret_119, __ret_119, 1, 0); \ + __ret_119; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vqdmlsl_lane_s16(__p0_116, __p1_116, __p2_116, __p3_116) __extension__ ({ \ - int32x4_t __ret_116; \ - int32x4_t __s0_116 = __p0_116; \ - int16x4_t __s1_116 = __p1_116; \ - int16x4_t __s2_116 = __p2_116; \ - __ret_116 = vqdmlsl_s16(__s0_116, __s1_116, splat_lane_s16(__s2_116, __p3_116)); \ - __ret_116; \ +#define vqdmlsl_lane_s16(__p0_120, __p1_120, __p2_120, __p3_120) __extension__ ({ \ + int32x4_t __ret_120; \ + int32x4_t __s0_120 = __p0_120; \ + int16x4_t __s1_120 = __p1_120; \ + int16x4_t __s2_120 = __p2_120; \ + __ret_120 = vqdmlsl_s16(__s0_120, __s1_120, splat_lane_s16(__s2_120, __p3_120)); \ + __ret_120; \ }) #else -#define vqdmlsl_lane_s16(__p0_117, __p1_117, __p2_117, __p3_117) __extension__ ({ \ - int32x4_t __ret_117; \ - int32x4_t __s0_117 = __p0_117; \ - int16x4_t __s1_117 = __p1_117; \ - int16x4_t __s2_117 = __p2_117; \ - int32x4_t __rev0_117; __rev0_117 = __builtin_shufflevector(__s0_117, __s0_117, 3, 2, 1, 0); \ - int16x4_t __rev1_117; __rev1_117 = __builtin_shufflevector(__s1_117, __s1_117, 3, 2, 1, 0); \ - int16x4_t __rev2_117; __rev2_117 = __builtin_shufflevector(__s2_117, __s2_117, 3, 2, 1, 0); \ - __ret_117 = __noswap_vqdmlsl_s16(__rev0_117, __rev1_117, __noswap_splat_lane_s16(__rev2_117, __p3_117)); \ - __ret_117 = __builtin_shufflevector(__ret_117, __ret_117, 3, 2, 1, 0); \ - __ret_117; \ +#define vqdmlsl_lane_s16(__p0_121, __p1_121, __p2_121, __p3_121) __extension__ ({ \ + int32x4_t __ret_121; \ + int32x4_t __s0_121 = __p0_121; \ + int16x4_t __s1_121 = __p1_121; \ + int16x4_t __s2_121 = __p2_121; \ + int32x4_t __rev0_121; __rev0_121 = __builtin_shufflevector(__s0_121, __s0_121, 3, 2, 1, 0); \ + int16x4_t __rev1_121; __rev1_121 = __builtin_shufflevector(__s1_121, __s1_121, 3, 2, 1, 0); \ + int16x4_t __rev2_121; __rev2_121 = __builtin_shufflevector(__s2_121, __s2_121, 3, 2, 1, 0); \ + __ret_121 = __noswap_vqdmlsl_s16(__rev0_121, __rev1_121, __noswap_splat_lane_s16(__rev2_121, __p3_121)); \ + __ret_121 = __builtin_shufflevector(__ret_121, __ret_121, 3, 2, 1, 0); \ + __ret_121; \ }) #endif @@ -19711,44 +19741,44 @@ __ai int32x4_t __noswap_vqdmull_s16(int16x4_t __p0, int16x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -#define vqdmull_lane_s32(__p0_118, __p1_118, __p2_118) __extension__ ({ \ - int64x2_t __ret_118; \ - int32x2_t __s0_118 = __p0_118; \ - int32x2_t __s1_118 = __p1_118; \ - __ret_118 = vqdmull_s32(__s0_118, splat_lane_s32(__s1_118, __p2_118)); \ - __ret_118; \ +#define vqdmull_lane_s32(__p0_122, __p1_122, __p2_122) __extension__ ({ \ + int64x2_t __ret_122; \ + int32x2_t __s0_122 = __p0_122; \ + int32x2_t __s1_122 = __p1_122; \ + __ret_122 = vqdmull_s32(__s0_122, splat_lane_s32(__s1_122, __p2_122)); \ + __ret_122; \ }) #else -#define vqdmull_lane_s32(__p0_119, __p1_119, __p2_119) __extension__ ({ \ - int64x2_t __ret_119; \ - int32x2_t __s0_119 = __p0_119; \ - int32x2_t __s1_119 = __p1_119; \ - int32x2_t __rev0_119; __rev0_119 = __builtin_shufflevector(__s0_119, __s0_119, 1, 0); \ - int32x2_t __rev1_119; __rev1_119 = __builtin_shufflevector(__s1_119, __s1_119, 1, 0); \ - __ret_119 = __noswap_vqdmull_s32(__rev0_119, __noswap_splat_lane_s32(__rev1_119, __p2_119)); \ - __ret_119 = __builtin_shufflevector(__ret_119, __ret_119, 1, 0); \ - __ret_119; \ +#define vqdmull_lane_s32(__p0_123, __p1_123, __p2_123) __extension__ ({ \ + int64x2_t __ret_123; \ + int32x2_t __s0_123 = __p0_123; \ + int32x2_t __s1_123 = __p1_123; \ + int32x2_t __rev0_123; __rev0_123 = __builtin_shufflevector(__s0_123, __s0_123, 1, 0); \ + int32x2_t __rev1_123; __rev1_123 = __builtin_shufflevector(__s1_123, __s1_123, 1, 0); \ + __ret_123 = __noswap_vqdmull_s32(__rev0_123, __noswap_splat_lane_s32(__rev1_123, __p2_123)); \ + __ret_123 = __builtin_shufflevector(__ret_123, __ret_123, 1, 0); \ + __ret_123; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vqdmull_lane_s16(__p0_120, __p1_120, __p2_120) __extension__ ({ \ - int32x4_t __ret_120; \ - int16x4_t __s0_120 = __p0_120; \ - int16x4_t __s1_120 = __p1_120; \ - __ret_120 = vqdmull_s16(__s0_120, splat_lane_s16(__s1_120, __p2_120)); \ - __ret_120; \ +#define vqdmull_lane_s16(__p0_124, __p1_124, __p2_124) __extension__ ({ \ + int32x4_t __ret_124; \ + int16x4_t __s0_124 = __p0_124; \ + int16x4_t __s1_124 = __p1_124; \ + __ret_124 = vqdmull_s16(__s0_124, splat_lane_s16(__s1_124, __p2_124)); \ + __ret_124; \ }) #else -#define vqdmull_lane_s16(__p0_121, __p1_121, __p2_121) __extension__ ({ \ - int32x4_t __ret_121; \ - int16x4_t __s0_121 = __p0_121; \ - int16x4_t __s1_121 = __p1_121; \ - int16x4_t __rev0_121; __rev0_121 = __builtin_shufflevector(__s0_121, __s0_121, 3, 2, 1, 0); \ - int16x4_t __rev1_121; __rev1_121 = __builtin_shufflevector(__s1_121, __s1_121, 3, 2, 1, 0); \ - __ret_121 = __noswap_vqdmull_s16(__rev0_121, __noswap_splat_lane_s16(__rev1_121, __p2_121)); \ - __ret_121 = __builtin_shufflevector(__ret_121, __ret_121, 3, 2, 1, 0); \ - __ret_121; \ +#define vqdmull_lane_s16(__p0_125, __p1_125, __p2_125) __extension__ ({ \ + int32x4_t __ret_125; \ + int16x4_t __s0_125 = __p0_125; \ + int16x4_t __s1_125 = __p1_125; \ + int16x4_t __rev0_125; __rev0_125 = __builtin_shufflevector(__s0_125, __s0_125, 3, 2, 1, 0); \ + int16x4_t __rev1_125; __rev1_125 = __builtin_shufflevector(__s1_125, __s1_125, 3, 2, 1, 0); \ + __ret_125 = __noswap_vqdmull_s16(__rev0_125, __noswap_splat_lane_s16(__rev1_125, __p2_125)); \ + __ret_125 = __builtin_shufflevector(__ret_125, __ret_125, 3, 2, 1, 0); \ + __ret_125; \ }) #endif @@ -32235,277 +32265,5705 @@ __ai int16x4x2_t vzip_s16(int16x4_t __p0, int16x4_t __p1) { } #endif -#if !defined(__aarch64__) #ifdef __LITTLE_ENDIAN__ -#define vdupq_lane_f16(__p0_122, __p1_122) __extension__ ({ \ - float16x8_t __ret_122; \ - float16x4_t __s0_122 = __p0_122; \ - __ret_122 = splatq_lane_f16(__s0_122, __p1_122); \ - __ret_122; \ -}) -#else -#define vdupq_lane_f16(__p0_123, __p1_123) __extension__ ({ \ - float16x8_t __ret_123; \ - float16x4_t __s0_123 = __p0_123; \ - float16x4_t __rev0_123; __rev0_123 = __builtin_shufflevector(__s0_123, __s0_123, 3, 2, 1, 0); \ - __ret_123 = __noswap_splatq_lane_f16(__rev0_123, __p1_123); \ - __ret_123 = __builtin_shufflevector(__ret_123, __ret_123, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_123; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vdup_lane_f16(__p0_124, __p1_124) __extension__ ({ \ - float16x4_t __ret_124; \ - float16x4_t __s0_124 = __p0_124; \ - __ret_124 = splat_lane_f16(__s0_124, __p1_124); \ - __ret_124; \ -}) -#else -#define vdup_lane_f16(__p0_125, __p1_125) __extension__ ({ \ - float16x4_t __ret_125; \ - float16x4_t __s0_125 = __p0_125; \ - float16x4_t __rev0_125; __rev0_125 = __builtin_shufflevector(__s0_125, __s0_125, 3, 2, 1, 0); \ - __ret_125 = __noswap_splat_lane_f16(__rev0_125, __p1_125); \ - __ret_125 = __builtin_shufflevector(__ret_125, __ret_125, 3, 2, 1, 0); \ - __ret_125; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vdupq_n_f16(__p0) __extension__ ({ \ - float16x8_t __ret; \ - float16_t __s0 = __p0; \ - __ret = (float16x8_t) {__s0, __s0, __s0, __s0, __s0, __s0, __s0, __s0}; \ +#define splatq_lane_bf16(__p0, __p1) __extension__ ({ \ + bfloat16x8_t __ret; \ + bfloat16x4_t __s0 = __p0; \ + __ret = (bfloat16x8_t) __builtin_neon_splatq_lane_bf16((int8x8_t)__s0, __p1, 11); \ __ret; \ }) #else -#define vdupq_n_f16(__p0) __extension__ ({ \ - float16x8_t __ret; \ - float16_t __s0 = __p0; \ - __ret = (float16x8_t) {__s0, __s0, __s0, __s0, __s0, __s0, __s0, __s0}; \ +#define splatq_lane_bf16(__p0, __p1) __extension__ ({ \ + bfloat16x8_t __ret; \ + bfloat16x4_t __s0 = __p0; \ + bfloat16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + __ret = (bfloat16x8_t) __builtin_neon_splatq_lane_bf16((int8x8_t)__rev0, __p1, 11); \ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) +#define __noswap_splatq_lane_bf16(__p0, __p1) __extension__ ({ \ + bfloat16x8_t __ret; \ + bfloat16x4_t __s0 = __p0; \ + __ret = (bfloat16x8_t) __builtin_neon_splatq_lane_bf16((int8x8_t)__s0, __p1, 11); \ + __ret; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -#define vdup_n_f16(__p0) __extension__ ({ \ - float16x4_t __ret; \ - float16_t __s0 = __p0; \ - __ret = (float16x4_t) {__s0, __s0, __s0, __s0}; \ +#define splat_lane_bf16(__p0, __p1) __extension__ ({ \ + bfloat16x4_t __ret; \ + bfloat16x4_t __s0 = __p0; \ + __ret = (bfloat16x4_t) __builtin_neon_splat_lane_bf16((int8x8_t)__s0, __p1, 11); \ __ret; \ }) #else -#define vdup_n_f16(__p0) __extension__ ({ \ - float16x4_t __ret; \ - float16_t __s0 = __p0; \ - __ret = (float16x4_t) {__s0, __s0, __s0, __s0}; \ +#define splat_lane_bf16(__p0, __p1) __extension__ ({ \ + bfloat16x4_t __ret; \ + bfloat16x4_t __s0 = __p0; \ + bfloat16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + __ret = (bfloat16x4_t) __builtin_neon_splat_lane_bf16((int8x8_t)__rev0, __p1, 11); \ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ __ret; \ }) +#define __noswap_splat_lane_bf16(__p0, __p1) __extension__ ({ \ + bfloat16x4_t __ret; \ + bfloat16x4_t __s0 = __p0; \ + __ret = (bfloat16x4_t) __builtin_neon_splat_lane_bf16((int8x8_t)__s0, __p1, 11); \ + __ret; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -#define vmovq_n_f16(__p0) __extension__ ({ \ - float16x8_t __ret; \ - float16_t __s0 = __p0; \ - __ret = (float16x8_t) {__s0, __s0, __s0, __s0, __s0, __s0, __s0, __s0}; \ +#define splatq_laneq_bf16(__p0, __p1) __extension__ ({ \ + bfloat16x8_t __ret; \ + bfloat16x8_t __s0 = __p0; \ + __ret = (bfloat16x8_t) __builtin_neon_splatq_laneq_bf16((int8x16_t)__s0, __p1, 43); \ __ret; \ }) #else -#define vmovq_n_f16(__p0) __extension__ ({ \ - float16x8_t __ret; \ - float16_t __s0 = __p0; \ - __ret = (float16x8_t) {__s0, __s0, __s0, __s0, __s0, __s0, __s0, __s0}; \ +#define splatq_laneq_bf16(__p0, __p1) __extension__ ({ \ + bfloat16x8_t __ret; \ + bfloat16x8_t __s0 = __p0; \ + bfloat16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (bfloat16x8_t) __builtin_neon_splatq_laneq_bf16((int8x16_t)__rev0, __p1, 43); \ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) +#define __noswap_splatq_laneq_bf16(__p0, __p1) __extension__ ({ \ + bfloat16x8_t __ret; \ + bfloat16x8_t __s0 = __p0; \ + __ret = (bfloat16x8_t) __builtin_neon_splatq_laneq_bf16((int8x16_t)__s0, __p1, 43); \ + __ret; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -#define vmov_n_f16(__p0) __extension__ ({ \ - float16x4_t __ret; \ - float16_t __s0 = __p0; \ - __ret = (float16x4_t) {__s0, __s0, __s0, __s0}; \ +#define splat_laneq_bf16(__p0, __p1) __extension__ ({ \ + bfloat16x4_t __ret; \ + bfloat16x8_t __s0 = __p0; \ + __ret = (bfloat16x4_t) __builtin_neon_splat_laneq_bf16((int8x16_t)__s0, __p1, 43); \ __ret; \ }) #else -#define vmov_n_f16(__p0) __extension__ ({ \ - float16x4_t __ret; \ - float16_t __s0 = __p0; \ - __ret = (float16x4_t) {__s0, __s0, __s0, __s0}; \ +#define splat_laneq_bf16(__p0, __p1) __extension__ ({ \ + bfloat16x4_t __ret; \ + bfloat16x8_t __s0 = __p0; \ + bfloat16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (bfloat16x4_t) __builtin_neon_splat_laneq_bf16((int8x16_t)__rev0, __p1, 43); \ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ __ret; \ }) +#define __noswap_splat_laneq_bf16(__p0, __p1) __extension__ ({ \ + bfloat16x4_t __ret; \ + bfloat16x8_t __s0 = __p0; \ + __ret = (bfloat16x4_t) __builtin_neon_splat_laneq_bf16((int8x16_t)__s0, __p1, 43); \ + __ret; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -#define vqdmulhq_lane_s32(__p0_126, __p1_126, __p2_126) __extension__ ({ \ - int32x4_t __ret_126; \ - int32x4_t __s0_126 = __p0_126; \ - int32x2_t __s1_126 = __p1_126; \ - __ret_126 = vqdmulhq_s32(__s0_126, splatq_lane_s32(__s1_126, __p2_126)); \ +__ai __attribute__((target("bf16"))) float32x4_t vbfdotq_f32(float32x4_t __p0, bfloat16x8_t __p1, bfloat16x8_t __p2) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vbfdotq_f32((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); + return __ret; +} +#else +__ai __attribute__((target("bf16"))) float32x4_t vbfdotq_f32(float32x4_t __p0, bfloat16x8_t __p1, bfloat16x8_t __p2) { + float32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + bfloat16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + bfloat16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (float32x4_t) __builtin_neon_vbfdotq_f32((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("bf16"))) float32x4_t __noswap_vbfdotq_f32(float32x4_t __p0, bfloat16x8_t __p1, bfloat16x8_t __p2) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vbfdotq_f32((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("bf16"))) float32x2_t vbfdot_f32(float32x2_t __p0, bfloat16x4_t __p1, bfloat16x4_t __p2) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vbfdot_f32((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9); + return __ret; +} +#else +__ai __attribute__((target("bf16"))) float32x2_t vbfdot_f32(float32x2_t __p0, bfloat16x4_t __p1, bfloat16x4_t __p2) { + float32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + bfloat16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + bfloat16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = (float32x2_t) __builtin_neon_vbfdot_f32((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 9); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai __attribute__((target("bf16"))) float32x2_t __noswap_vbfdot_f32(float32x2_t __p0, bfloat16x4_t __p1, bfloat16x4_t __p2) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vbfdot_f32((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vbfdotq_lane_f32(__p0_126, __p1_126, __p2_126, __p3_126) __extension__ ({ \ + float32x4_t __ret_126; \ + float32x4_t __s0_126 = __p0_126; \ + bfloat16x8_t __s1_126 = __p1_126; \ + bfloat16x4_t __s2_126 = __p2_126; \ +bfloat16x4_t __reint_126 = __s2_126; \ +float32x4_t __reint1_126 = splatq_lane_f32(*(float32x2_t *) &__reint_126, __p3_126); \ + __ret_126 = vbfdotq_f32(__s0_126, __s1_126, *(bfloat16x8_t *) &__reint1_126); \ __ret_126; \ }) #else -#define vqdmulhq_lane_s32(__p0_127, __p1_127, __p2_127) __extension__ ({ \ - int32x4_t __ret_127; \ - int32x4_t __s0_127 = __p0_127; \ - int32x2_t __s1_127 = __p1_127; \ - int32x4_t __rev0_127; __rev0_127 = __builtin_shufflevector(__s0_127, __s0_127, 3, 2, 1, 0); \ - int32x2_t __rev1_127; __rev1_127 = __builtin_shufflevector(__s1_127, __s1_127, 1, 0); \ - __ret_127 = __noswap_vqdmulhq_s32(__rev0_127, __noswap_splatq_lane_s32(__rev1_127, __p2_127)); \ +#define vbfdotq_lane_f32(__p0_127, __p1_127, __p2_127, __p3_127) __extension__ ({ \ + float32x4_t __ret_127; \ + float32x4_t __s0_127 = __p0_127; \ + bfloat16x8_t __s1_127 = __p1_127; \ + bfloat16x4_t __s2_127 = __p2_127; \ + float32x4_t __rev0_127; __rev0_127 = __builtin_shufflevector(__s0_127, __s0_127, 3, 2, 1, 0); \ + bfloat16x8_t __rev1_127; __rev1_127 = __builtin_shufflevector(__s1_127, __s1_127, 7, 6, 5, 4, 3, 2, 1, 0); \ + bfloat16x4_t __rev2_127; __rev2_127 = __builtin_shufflevector(__s2_127, __s2_127, 3, 2, 1, 0); \ +bfloat16x4_t __reint_127 = __rev2_127; \ +float32x4_t __reint1_127 = __noswap_splatq_lane_f32(*(float32x2_t *) &__reint_127, __p3_127); \ + __ret_127 = __noswap_vbfdotq_f32(__rev0_127, __rev1_127, *(bfloat16x8_t *) &__reint1_127); \ __ret_127 = __builtin_shufflevector(__ret_127, __ret_127, 3, 2, 1, 0); \ __ret_127; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vqdmulhq_lane_s16(__p0_128, __p1_128, __p2_128) __extension__ ({ \ - int16x8_t __ret_128; \ - int16x8_t __s0_128 = __p0_128; \ - int16x4_t __s1_128 = __p1_128; \ - __ret_128 = vqdmulhq_s16(__s0_128, splatq_lane_s16(__s1_128, __p2_128)); \ +#define vbfdot_lane_f32(__p0_128, __p1_128, __p2_128, __p3_128) __extension__ ({ \ + float32x2_t __ret_128; \ + float32x2_t __s0_128 = __p0_128; \ + bfloat16x4_t __s1_128 = __p1_128; \ + bfloat16x4_t __s2_128 = __p2_128; \ +bfloat16x4_t __reint_128 = __s2_128; \ +float32x2_t __reint1_128 = splat_lane_f32(*(float32x2_t *) &__reint_128, __p3_128); \ + __ret_128 = vbfdot_f32(__s0_128, __s1_128, *(bfloat16x4_t *) &__reint1_128); \ __ret_128; \ }) #else -#define vqdmulhq_lane_s16(__p0_129, __p1_129, __p2_129) __extension__ ({ \ - int16x8_t __ret_129; \ - int16x8_t __s0_129 = __p0_129; \ - int16x4_t __s1_129 = __p1_129; \ - int16x8_t __rev0_129; __rev0_129 = __builtin_shufflevector(__s0_129, __s0_129, 7, 6, 5, 4, 3, 2, 1, 0); \ - int16x4_t __rev1_129; __rev1_129 = __builtin_shufflevector(__s1_129, __s1_129, 3, 2, 1, 0); \ - __ret_129 = __noswap_vqdmulhq_s16(__rev0_129, __noswap_splatq_lane_s16(__rev1_129, __p2_129)); \ - __ret_129 = __builtin_shufflevector(__ret_129, __ret_129, 7, 6, 5, 4, 3, 2, 1, 0); \ +#define vbfdot_lane_f32(__p0_129, __p1_129, __p2_129, __p3_129) __extension__ ({ \ + float32x2_t __ret_129; \ + float32x2_t __s0_129 = __p0_129; \ + bfloat16x4_t __s1_129 = __p1_129; \ + bfloat16x4_t __s2_129 = __p2_129; \ + float32x2_t __rev0_129; __rev0_129 = __builtin_shufflevector(__s0_129, __s0_129, 1, 0); \ + bfloat16x4_t __rev1_129; __rev1_129 = __builtin_shufflevector(__s1_129, __s1_129, 3, 2, 1, 0); \ + bfloat16x4_t __rev2_129; __rev2_129 = __builtin_shufflevector(__s2_129, __s2_129, 3, 2, 1, 0); \ +bfloat16x4_t __reint_129 = __rev2_129; \ +float32x2_t __reint1_129 = __noswap_splat_lane_f32(*(float32x2_t *) &__reint_129, __p3_129); \ + __ret_129 = __noswap_vbfdot_f32(__rev0_129, __rev1_129, *(bfloat16x4_t *) &__reint1_129); \ + __ret_129 = __builtin_shufflevector(__ret_129, __ret_129, 1, 0); \ __ret_129; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vqdmulh_lane_s32(__p0_130, __p1_130, __p2_130) __extension__ ({ \ - int32x2_t __ret_130; \ - int32x2_t __s0_130 = __p0_130; \ - int32x2_t __s1_130 = __p1_130; \ - __ret_130 = vqdmulh_s32(__s0_130, splat_lane_s32(__s1_130, __p2_130)); \ +#define vbfdotq_laneq_f32(__p0_130, __p1_130, __p2_130, __p3_130) __extension__ ({ \ + float32x4_t __ret_130; \ + float32x4_t __s0_130 = __p0_130; \ + bfloat16x8_t __s1_130 = __p1_130; \ + bfloat16x8_t __s2_130 = __p2_130; \ +bfloat16x8_t __reint_130 = __s2_130; \ +float32x4_t __reint1_130 = splatq_laneq_f32(*(float32x4_t *) &__reint_130, __p3_130); \ + __ret_130 = vbfdotq_f32(__s0_130, __s1_130, *(bfloat16x8_t *) &__reint1_130); \ __ret_130; \ }) #else -#define vqdmulh_lane_s32(__p0_131, __p1_131, __p2_131) __extension__ ({ \ - int32x2_t __ret_131; \ - int32x2_t __s0_131 = __p0_131; \ - int32x2_t __s1_131 = __p1_131; \ - int32x2_t __rev0_131; __rev0_131 = __builtin_shufflevector(__s0_131, __s0_131, 1, 0); \ - int32x2_t __rev1_131; __rev1_131 = __builtin_shufflevector(__s1_131, __s1_131, 1, 0); \ - __ret_131 = __noswap_vqdmulh_s32(__rev0_131, __noswap_splat_lane_s32(__rev1_131, __p2_131)); \ - __ret_131 = __builtin_shufflevector(__ret_131, __ret_131, 1, 0); \ +#define vbfdotq_laneq_f32(__p0_131, __p1_131, __p2_131, __p3_131) __extension__ ({ \ + float32x4_t __ret_131; \ + float32x4_t __s0_131 = __p0_131; \ + bfloat16x8_t __s1_131 = __p1_131; \ + bfloat16x8_t __s2_131 = __p2_131; \ + float32x4_t __rev0_131; __rev0_131 = __builtin_shufflevector(__s0_131, __s0_131, 3, 2, 1, 0); \ + bfloat16x8_t __rev1_131; __rev1_131 = __builtin_shufflevector(__s1_131, __s1_131, 7, 6, 5, 4, 3, 2, 1, 0); \ + bfloat16x8_t __rev2_131; __rev2_131 = __builtin_shufflevector(__s2_131, __s2_131, 7, 6, 5, 4, 3, 2, 1, 0); \ +bfloat16x8_t __reint_131 = __rev2_131; \ +float32x4_t __reint1_131 = __noswap_splatq_laneq_f32(*(float32x4_t *) &__reint_131, __p3_131); \ + __ret_131 = __noswap_vbfdotq_f32(__rev0_131, __rev1_131, *(bfloat16x8_t *) &__reint1_131); \ + __ret_131 = __builtin_shufflevector(__ret_131, __ret_131, 3, 2, 1, 0); \ __ret_131; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vqdmulh_lane_s16(__p0_132, __p1_132, __p2_132) __extension__ ({ \ - int16x4_t __ret_132; \ - int16x4_t __s0_132 = __p0_132; \ - int16x4_t __s1_132 = __p1_132; \ - __ret_132 = vqdmulh_s16(__s0_132, splat_lane_s16(__s1_132, __p2_132)); \ +#define vbfdot_laneq_f32(__p0_132, __p1_132, __p2_132, __p3_132) __extension__ ({ \ + float32x2_t __ret_132; \ + float32x2_t __s0_132 = __p0_132; \ + bfloat16x4_t __s1_132 = __p1_132; \ + bfloat16x8_t __s2_132 = __p2_132; \ +bfloat16x8_t __reint_132 = __s2_132; \ +float32x2_t __reint1_132 = splat_laneq_f32(*(float32x4_t *) &__reint_132, __p3_132); \ + __ret_132 = vbfdot_f32(__s0_132, __s1_132, *(bfloat16x4_t *) &__reint1_132); \ __ret_132; \ }) #else -#define vqdmulh_lane_s16(__p0_133, __p1_133, __p2_133) __extension__ ({ \ - int16x4_t __ret_133; \ - int16x4_t __s0_133 = __p0_133; \ - int16x4_t __s1_133 = __p1_133; \ - int16x4_t __rev0_133; __rev0_133 = __builtin_shufflevector(__s0_133, __s0_133, 3, 2, 1, 0); \ - int16x4_t __rev1_133; __rev1_133 = __builtin_shufflevector(__s1_133, __s1_133, 3, 2, 1, 0); \ - __ret_133 = __noswap_vqdmulh_s16(__rev0_133, __noswap_splat_lane_s16(__rev1_133, __p2_133)); \ - __ret_133 = __builtin_shufflevector(__ret_133, __ret_133, 3, 2, 1, 0); \ +#define vbfdot_laneq_f32(__p0_133, __p1_133, __p2_133, __p3_133) __extension__ ({ \ + float32x2_t __ret_133; \ + float32x2_t __s0_133 = __p0_133; \ + bfloat16x4_t __s1_133 = __p1_133; \ + bfloat16x8_t __s2_133 = __p2_133; \ + float32x2_t __rev0_133; __rev0_133 = __builtin_shufflevector(__s0_133, __s0_133, 1, 0); \ + bfloat16x4_t __rev1_133; __rev1_133 = __builtin_shufflevector(__s1_133, __s1_133, 3, 2, 1, 0); \ + bfloat16x8_t __rev2_133; __rev2_133 = __builtin_shufflevector(__s2_133, __s2_133, 7, 6, 5, 4, 3, 2, 1, 0); \ +bfloat16x8_t __reint_133 = __rev2_133; \ +float32x2_t __reint1_133 = __noswap_splat_laneq_f32(*(float32x4_t *) &__reint_133, __p3_133); \ + __ret_133 = __noswap_vbfdot_f32(__rev0_133, __rev1_133, *(bfloat16x4_t *) &__reint1_133); \ + __ret_133 = __builtin_shufflevector(__ret_133, __ret_133, 1, 0); \ __ret_133; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vqrdmulhq_lane_s32(__p0_134, __p1_134, __p2_134) __extension__ ({ \ - int32x4_t __ret_134; \ - int32x4_t __s0_134 = __p0_134; \ - int32x2_t __s1_134 = __p1_134; \ - __ret_134 = vqrdmulhq_s32(__s0_134, splatq_lane_s32(__s1_134, __p2_134)); \ - __ret_134; \ +__ai __attribute__((target("bf16"))) float32x4_t vbfmlalbq_f32(float32x4_t __p0, bfloat16x8_t __p1, bfloat16x8_t __p2) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vbfmlalbq_f32((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); + return __ret; +} +#else +__ai __attribute__((target("bf16"))) float32x4_t vbfmlalbq_f32(float32x4_t __p0, bfloat16x8_t __p1, bfloat16x8_t __p2) { + float32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + bfloat16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + bfloat16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (float32x4_t) __builtin_neon_vbfmlalbq_f32((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("bf16"))) float32x4_t __noswap_vbfmlalbq_f32(float32x4_t __p0, bfloat16x8_t __p1, bfloat16x8_t __p2) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vbfmlalbq_f32((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("bf16"))) float32x4_t vbfmlaltq_f32(float32x4_t __p0, bfloat16x8_t __p1, bfloat16x8_t __p2) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vbfmlaltq_f32((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); + return __ret; +} +#else +__ai __attribute__((target("bf16"))) float32x4_t vbfmlaltq_f32(float32x4_t __p0, bfloat16x8_t __p1, bfloat16x8_t __p2) { + float32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + bfloat16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + bfloat16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (float32x4_t) __builtin_neon_vbfmlaltq_f32((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("bf16"))) float32x4_t __noswap_vbfmlaltq_f32(float32x4_t __p0, bfloat16x8_t __p1, bfloat16x8_t __p2) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vbfmlaltq_f32((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("bf16"))) float32x4_t vbfmmlaq_f32(float32x4_t __p0, bfloat16x8_t __p1, bfloat16x8_t __p2) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vbfmmlaq_f32((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); + return __ret; +} +#else +__ai __attribute__((target("bf16"))) float32x4_t vbfmmlaq_f32(float32x4_t __p0, bfloat16x8_t __p1, bfloat16x8_t __p2) { + float32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + bfloat16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + bfloat16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (float32x4_t) __builtin_neon_vbfmmlaq_f32((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("bf16"))) bfloat16x8_t vcombine_bf16(bfloat16x4_t __p0, bfloat16x4_t __p1) { + bfloat16x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7); + return __ret; +} +#else +__ai __attribute__((target("bf16"))) bfloat16x8_t vcombine_bf16(bfloat16x4_t __p0, bfloat16x4_t __p1) { + bfloat16x8_t __ret; + bfloat16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + bfloat16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 1, 2, 3, 4, 5, 6, 7); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("bf16"))) bfloat16x8_t __noswap_vcombine_bf16(bfloat16x4_t __p0, bfloat16x4_t __p1) { + bfloat16x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7); + return __ret; +} +#endif + +#define vcreate_bf16(__p0) __extension__ ({ \ + bfloat16x4_t __ret; \ + uint64_t __promote = __p0; \ + __ret = (bfloat16x4_t)(__promote); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("bf16"))) float32x4_t vcvt_f32_bf16(bfloat16x4_t __p0_134) { + float32x4_t __ret_134; +bfloat16x4_t __reint_134 = __p0_134; +int32x4_t __reint1_134 = vshll_n_s16(*(int16x4_t *) &__reint_134, 16); + __ret_134 = *(float32x4_t *) &__reint1_134; + return __ret_134; +} +#else +__ai __attribute__((target("bf16"))) float32x4_t vcvt_f32_bf16(bfloat16x4_t __p0_135) { + float32x4_t __ret_135; + bfloat16x4_t __rev0_135; __rev0_135 = __builtin_shufflevector(__p0_135, __p0_135, 3, 2, 1, 0); +bfloat16x4_t __reint_135 = __rev0_135; +int32x4_t __reint1_135 = __noswap_vshll_n_s16(*(int16x4_t *) &__reint_135, 16); + __ret_135 = *(float32x4_t *) &__reint1_135; + __ret_135 = __builtin_shufflevector(__ret_135, __ret_135, 3, 2, 1, 0); + return __ret_135; +} +__ai __attribute__((target("bf16"))) float32x4_t __noswap_vcvt_f32_bf16(bfloat16x4_t __p0_136) { + float32x4_t __ret_136; +bfloat16x4_t __reint_136 = __p0_136; +int32x4_t __reint1_136 = __noswap_vshll_n_s16(*(int16x4_t *) &__reint_136, 16); + __ret_136 = *(float32x4_t *) &__reint1_136; + return __ret_136; +} +#endif + +__ai __attribute__((target("bf16"))) float32_t vcvtah_f32_bf16(bfloat16_t __p0) { + float32_t __ret; +bfloat16_t __reint = __p0; +int32_t __reint1 = *(int32_t *) &__reint << 16; + __ret = *(float32_t *) &__reint1; + return __ret; +} +__ai __attribute__((target("bf16"))) bfloat16_t vcvth_bf16_f32(float32_t __p0) { + bfloat16_t __ret; + __ret = (bfloat16_t) __builtin_neon_vcvth_bf16_f32(__p0); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +#define vduph_lane_bf16(__p0, __p1) __extension__ ({ \ + bfloat16_t __ret; \ + bfloat16x4_t __s0 = __p0; \ + __ret = (bfloat16_t) __builtin_neon_vduph_lane_bf16((bfloat16x4_t)__s0, __p1); \ + __ret; \ }) #else -#define vqrdmulhq_lane_s32(__p0_135, __p1_135, __p2_135) __extension__ ({ \ - int32x4_t __ret_135; \ - int32x4_t __s0_135 = __p0_135; \ - int32x2_t __s1_135 = __p1_135; \ - int32x4_t __rev0_135; __rev0_135 = __builtin_shufflevector(__s0_135, __s0_135, 3, 2, 1, 0); \ - int32x2_t __rev1_135; __rev1_135 = __builtin_shufflevector(__s1_135, __s1_135, 1, 0); \ - __ret_135 = __noswap_vqrdmulhq_s32(__rev0_135, __noswap_splatq_lane_s32(__rev1_135, __p2_135)); \ - __ret_135 = __builtin_shufflevector(__ret_135, __ret_135, 3, 2, 1, 0); \ - __ret_135; \ +#define vduph_lane_bf16(__p0, __p1) __extension__ ({ \ + bfloat16_t __ret; \ + bfloat16x4_t __s0 = __p0; \ + bfloat16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + __ret = (bfloat16_t) __builtin_neon_vduph_lane_bf16((bfloat16x4_t)__rev0, __p1); \ + __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vqrdmulhq_lane_s16(__p0_136, __p1_136, __p2_136) __extension__ ({ \ - int16x8_t __ret_136; \ - int16x8_t __s0_136 = __p0_136; \ - int16x4_t __s1_136 = __p1_136; \ - __ret_136 = vqrdmulhq_s16(__s0_136, splatq_lane_s16(__s1_136, __p2_136)); \ - __ret_136; \ -}) -#else -#define vqrdmulhq_lane_s16(__p0_137, __p1_137, __p2_137) __extension__ ({ \ - int16x8_t __ret_137; \ - int16x8_t __s0_137 = __p0_137; \ - int16x4_t __s1_137 = __p1_137; \ - int16x8_t __rev0_137; __rev0_137 = __builtin_shufflevector(__s0_137, __s0_137, 7, 6, 5, 4, 3, 2, 1, 0); \ - int16x4_t __rev1_137; __rev1_137 = __builtin_shufflevector(__s1_137, __s1_137, 3, 2, 1, 0); \ - __ret_137 = __noswap_vqrdmulhq_s16(__rev0_137, __noswap_splatq_lane_s16(__rev1_137, __p2_137)); \ - __ret_137 = __builtin_shufflevector(__ret_137, __ret_137, 7, 6, 5, 4, 3, 2, 1, 0); \ +#define vdupq_lane_bf16(__p0_137, __p1_137) __extension__ ({ \ + bfloat16x8_t __ret_137; \ + bfloat16x4_t __s0_137 = __p0_137; \ + __ret_137 = splatq_lane_bf16(__s0_137, __p1_137); \ __ret_137; \ }) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vqrdmulh_lane_s32(__p0_138, __p1_138, __p2_138) __extension__ ({ \ - int32x2_t __ret_138; \ - int32x2_t __s0_138 = __p0_138; \ - int32x2_t __s1_138 = __p1_138; \ - __ret_138 = vqrdmulh_s32(__s0_138, splat_lane_s32(__s1_138, __p2_138)); \ +#else +#define vdupq_lane_bf16(__p0_138, __p1_138) __extension__ ({ \ + bfloat16x8_t __ret_138; \ + bfloat16x4_t __s0_138 = __p0_138; \ + bfloat16x4_t __rev0_138; __rev0_138 = __builtin_shufflevector(__s0_138, __s0_138, 3, 2, 1, 0); \ + __ret_138 = __noswap_splatq_lane_bf16(__rev0_138, __p1_138); \ + __ret_138 = __builtin_shufflevector(__ret_138, __ret_138, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret_138; \ }) -#else -#define vqrdmulh_lane_s32(__p0_139, __p1_139, __p2_139) __extension__ ({ \ - int32x2_t __ret_139; \ - int32x2_t __s0_139 = __p0_139; \ - int32x2_t __s1_139 = __p1_139; \ - int32x2_t __rev0_139; __rev0_139 = __builtin_shufflevector(__s0_139, __s0_139, 1, 0); \ - int32x2_t __rev1_139; __rev1_139 = __builtin_shufflevector(__s1_139, __s1_139, 1, 0); \ - __ret_139 = __noswap_vqrdmulh_s32(__rev0_139, __noswap_splat_lane_s32(__rev1_139, __p2_139)); \ - __ret_139 = __builtin_shufflevector(__ret_139, __ret_139, 1, 0); \ +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdup_lane_bf16(__p0_139, __p1_139) __extension__ ({ \ + bfloat16x4_t __ret_139; \ + bfloat16x4_t __s0_139 = __p0_139; \ + __ret_139 = splat_lane_bf16(__s0_139, __p1_139); \ __ret_139; \ }) +#else +#define vdup_lane_bf16(__p0_140, __p1_140) __extension__ ({ \ + bfloat16x4_t __ret_140; \ + bfloat16x4_t __s0_140 = __p0_140; \ + bfloat16x4_t __rev0_140; __rev0_140 = __builtin_shufflevector(__s0_140, __s0_140, 3, 2, 1, 0); \ + __ret_140 = __noswap_splat_lane_bf16(__rev0_140, __p1_140); \ + __ret_140 = __builtin_shufflevector(__ret_140, __ret_140, 3, 2, 1, 0); \ + __ret_140; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -#define vqrdmulh_lane_s16(__p0_140, __p1_140, __p2_140) __extension__ ({ \ - int16x4_t __ret_140; \ - int16x4_t __s0_140 = __p0_140; \ - int16x4_t __s1_140 = __p1_140; \ - __ret_140 = vqrdmulh_s16(__s0_140, splat_lane_s16(__s1_140, __p2_140)); \ - __ret_140; \ +#define vduph_laneq_bf16(__p0, __p1) __extension__ ({ \ + bfloat16_t __ret; \ + bfloat16x8_t __s0 = __p0; \ + __ret = (bfloat16_t) __builtin_neon_vduph_laneq_bf16((bfloat16x8_t)__s0, __p1); \ + __ret; \ }) #else -#define vqrdmulh_lane_s16(__p0_141, __p1_141, __p2_141) __extension__ ({ \ - int16x4_t __ret_141; \ - int16x4_t __s0_141 = __p0_141; \ - int16x4_t __s1_141 = __p1_141; \ - int16x4_t __rev0_141; __rev0_141 = __builtin_shufflevector(__s0_141, __s0_141, 3, 2, 1, 0); \ - int16x4_t __rev1_141; __rev1_141 = __builtin_shufflevector(__s1_141, __s1_141, 3, 2, 1, 0); \ - __ret_141 = __noswap_vqrdmulh_s16(__rev0_141, __noswap_splat_lane_s16(__rev1_141, __p2_141)); \ - __ret_141 = __builtin_shufflevector(__ret_141, __ret_141, 3, 2, 1, 0); \ +#define vduph_laneq_bf16(__p0, __p1) __extension__ ({ \ + bfloat16_t __ret; \ + bfloat16x8_t __s0 = __p0; \ + bfloat16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (bfloat16_t) __builtin_neon_vduph_laneq_bf16((bfloat16x8_t)__rev0, __p1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdupq_laneq_bf16(__p0_141, __p1_141) __extension__ ({ \ + bfloat16x8_t __ret_141; \ + bfloat16x8_t __s0_141 = __p0_141; \ + __ret_141 = splatq_laneq_bf16(__s0_141, __p1_141); \ __ret_141; \ }) +#else +#define vdupq_laneq_bf16(__p0_142, __p1_142) __extension__ ({ \ + bfloat16x8_t __ret_142; \ + bfloat16x8_t __s0_142 = __p0_142; \ + bfloat16x8_t __rev0_142; __rev0_142 = __builtin_shufflevector(__s0_142, __s0_142, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_142 = __noswap_splatq_laneq_bf16(__rev0_142, __p1_142); \ + __ret_142 = __builtin_shufflevector(__ret_142, __ret_142, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_142; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdup_laneq_bf16(__p0_143, __p1_143) __extension__ ({ \ + bfloat16x4_t __ret_143; \ + bfloat16x8_t __s0_143 = __p0_143; \ + __ret_143 = splat_laneq_bf16(__s0_143, __p1_143); \ + __ret_143; \ +}) +#else +#define vdup_laneq_bf16(__p0_144, __p1_144) __extension__ ({ \ + bfloat16x4_t __ret_144; \ + bfloat16x8_t __s0_144 = __p0_144; \ + bfloat16x8_t __rev0_144; __rev0_144 = __builtin_shufflevector(__s0_144, __s0_144, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_144 = __noswap_splat_laneq_bf16(__rev0_144, __p1_144); \ + __ret_144 = __builtin_shufflevector(__ret_144, __ret_144, 3, 2, 1, 0); \ + __ret_144; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("bf16"))) bfloat16x8_t vdupq_n_bf16(bfloat16_t __p0) { + bfloat16x8_t __ret; + __ret = (bfloat16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; + return __ret; +} +#else +__ai __attribute__((target("bf16"))) bfloat16x8_t vdupq_n_bf16(bfloat16_t __p0) { + bfloat16x8_t __ret; + __ret = (bfloat16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("bf16"))) bfloat16x4_t vdup_n_bf16(bfloat16_t __p0) { + bfloat16x4_t __ret; + __ret = (bfloat16x4_t) {__p0, __p0, __p0, __p0}; + return __ret; +} +#else +__ai __attribute__((target("bf16"))) bfloat16x4_t vdup_n_bf16(bfloat16_t __p0) { + bfloat16x4_t __ret; + __ret = (bfloat16x4_t) {__p0, __p0, __p0, __p0}; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("bf16"))) bfloat16x4_t vget_high_bf16(bfloat16x8_t __p0) { + bfloat16x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 4, 5, 6, 7); + return __ret; +} +#else +__ai __attribute__((target("bf16"))) bfloat16x4_t vget_high_bf16(bfloat16x8_t __p0) { + bfloat16x4_t __ret; + bfloat16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev0, 4, 5, 6, 7); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("bf16"))) bfloat16x4_t __noswap_vget_high_bf16(bfloat16x8_t __p0) { + bfloat16x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 4, 5, 6, 7); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vgetq_lane_bf16(__p0, __p1) __extension__ ({ \ + bfloat16_t __ret; \ + bfloat16x8_t __s0 = __p0; \ + __ret = (bfloat16_t) __builtin_neon_vgetq_lane_bf16((bfloat16x8_t)__s0, __p1); \ + __ret; \ +}) +#else +#define vgetq_lane_bf16(__p0, __p1) __extension__ ({ \ + bfloat16_t __ret; \ + bfloat16x8_t __s0 = __p0; \ + bfloat16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (bfloat16_t) __builtin_neon_vgetq_lane_bf16((bfloat16x8_t)__rev0, __p1); \ + __ret; \ +}) +#define __noswap_vgetq_lane_bf16(__p0, __p1) __extension__ ({ \ + bfloat16_t __ret; \ + bfloat16x8_t __s0 = __p0; \ + __ret = (bfloat16_t) __builtin_neon_vgetq_lane_bf16((bfloat16x8_t)__s0, __p1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vget_lane_bf16(__p0, __p1) __extension__ ({ \ + bfloat16_t __ret; \ + bfloat16x4_t __s0 = __p0; \ + __ret = (bfloat16_t) __builtin_neon_vget_lane_bf16((bfloat16x4_t)__s0, __p1); \ + __ret; \ +}) +#else +#define vget_lane_bf16(__p0, __p1) __extension__ ({ \ + bfloat16_t __ret; \ + bfloat16x4_t __s0 = __p0; \ + bfloat16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + __ret = (bfloat16_t) __builtin_neon_vget_lane_bf16((bfloat16x4_t)__rev0, __p1); \ + __ret; \ +}) +#define __noswap_vget_lane_bf16(__p0, __p1) __extension__ ({ \ + bfloat16_t __ret; \ + bfloat16x4_t __s0 = __p0; \ + __ret = (bfloat16_t) __builtin_neon_vget_lane_bf16((bfloat16x4_t)__s0, __p1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("bf16"))) bfloat16x4_t vget_low_bf16(bfloat16x8_t __p0) { + bfloat16x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 0, 1, 2, 3); + return __ret; +} +#else +__ai __attribute__((target("bf16"))) bfloat16x4_t vget_low_bf16(bfloat16x8_t __p0) { + bfloat16x4_t __ret; + bfloat16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev0, 0, 1, 2, 3); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("bf16"))) bfloat16x4_t __noswap_vget_low_bf16(bfloat16x8_t __p0) { + bfloat16x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 0, 1, 2, 3); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_bf16(__p0) __extension__ ({ \ + bfloat16x8_t __ret; \ + __ret = (bfloat16x8_t) __builtin_neon_vld1q_bf16(__p0, 43); \ + __ret; \ +}) +#else +#define vld1q_bf16(__p0) __extension__ ({ \ + bfloat16x8_t __ret; \ + __ret = (bfloat16x8_t) __builtin_neon_vld1q_bf16(__p0, 43); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_bf16(__p0) __extension__ ({ \ + bfloat16x4_t __ret; \ + __ret = (bfloat16x4_t) __builtin_neon_vld1_bf16(__p0, 11); \ + __ret; \ +}) +#else +#define vld1_bf16(__p0) __extension__ ({ \ + bfloat16x4_t __ret; \ + __ret = (bfloat16x4_t) __builtin_neon_vld1_bf16(__p0, 11); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_dup_bf16(__p0) __extension__ ({ \ + bfloat16x8_t __ret; \ + __ret = (bfloat16x8_t) __builtin_neon_vld1q_dup_bf16(__p0, 43); \ + __ret; \ +}) +#else +#define vld1q_dup_bf16(__p0) __extension__ ({ \ + bfloat16x8_t __ret; \ + __ret = (bfloat16x8_t) __builtin_neon_vld1q_dup_bf16(__p0, 43); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_dup_bf16(__p0) __extension__ ({ \ + bfloat16x4_t __ret; \ + __ret = (bfloat16x4_t) __builtin_neon_vld1_dup_bf16(__p0, 11); \ + __ret; \ +}) +#else +#define vld1_dup_bf16(__p0) __extension__ ({ \ + bfloat16x4_t __ret; \ + __ret = (bfloat16x4_t) __builtin_neon_vld1_dup_bf16(__p0, 11); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ + bfloat16x8_t __ret; \ + bfloat16x8_t __s1 = __p1; \ + __ret = (bfloat16x8_t) __builtin_neon_vld1q_lane_bf16(__p0, (int8x16_t)__s1, __p2, 43); \ + __ret; \ +}) +#else +#define vld1q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ + bfloat16x8_t __ret; \ + bfloat16x8_t __s1 = __p1; \ + bfloat16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (bfloat16x8_t) __builtin_neon_vld1q_lane_bf16(__p0, (int8x16_t)__rev1, __p2, 43); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ + bfloat16x4_t __ret; \ + bfloat16x4_t __s1 = __p1; \ + __ret = (bfloat16x4_t) __builtin_neon_vld1_lane_bf16(__p0, (int8x8_t)__s1, __p2, 11); \ + __ret; \ +}) +#else +#define vld1_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ + bfloat16x4_t __ret; \ + bfloat16x4_t __s1 = __p1; \ + bfloat16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + __ret = (bfloat16x4_t) __builtin_neon_vld1_lane_bf16(__p0, (int8x8_t)__rev1, __p2, 11); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_bf16_x2(__p0) __extension__ ({ \ + bfloat16x8x2_t __ret; \ + __builtin_neon_vld1q_bf16_x2(&__ret, __p0, 43); \ + __ret; \ +}) +#else +#define vld1q_bf16_x2(__p0) __extension__ ({ \ + bfloat16x8x2_t __ret; \ + __builtin_neon_vld1q_bf16_x2(&__ret, __p0, 43); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_bf16_x2(__p0) __extension__ ({ \ + bfloat16x4x2_t __ret; \ + __builtin_neon_vld1_bf16_x2(&__ret, __p0, 11); \ + __ret; \ +}) +#else +#define vld1_bf16_x2(__p0) __extension__ ({ \ + bfloat16x4x2_t __ret; \ + __builtin_neon_vld1_bf16_x2(&__ret, __p0, 11); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_bf16_x3(__p0) __extension__ ({ \ + bfloat16x8x3_t __ret; \ + __builtin_neon_vld1q_bf16_x3(&__ret, __p0, 43); \ + __ret; \ +}) +#else +#define vld1q_bf16_x3(__p0) __extension__ ({ \ + bfloat16x8x3_t __ret; \ + __builtin_neon_vld1q_bf16_x3(&__ret, __p0, 43); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_bf16_x3(__p0) __extension__ ({ \ + bfloat16x4x3_t __ret; \ + __builtin_neon_vld1_bf16_x3(&__ret, __p0, 11); \ + __ret; \ +}) +#else +#define vld1_bf16_x3(__p0) __extension__ ({ \ + bfloat16x4x3_t __ret; \ + __builtin_neon_vld1_bf16_x3(&__ret, __p0, 11); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_bf16_x4(__p0) __extension__ ({ \ + bfloat16x8x4_t __ret; \ + __builtin_neon_vld1q_bf16_x4(&__ret, __p0, 43); \ + __ret; \ +}) +#else +#define vld1q_bf16_x4(__p0) __extension__ ({ \ + bfloat16x8x4_t __ret; \ + __builtin_neon_vld1q_bf16_x4(&__ret, __p0, 43); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_bf16_x4(__p0) __extension__ ({ \ + bfloat16x4x4_t __ret; \ + __builtin_neon_vld1_bf16_x4(&__ret, __p0, 11); \ + __ret; \ +}) +#else +#define vld1_bf16_x4(__p0) __extension__ ({ \ + bfloat16x4x4_t __ret; \ + __builtin_neon_vld1_bf16_x4(&__ret, __p0, 11); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2q_bf16(__p0) __extension__ ({ \ + bfloat16x8x2_t __ret; \ + __builtin_neon_vld2q_bf16(&__ret, __p0, 43); \ + __ret; \ +}) +#else +#define vld2q_bf16(__p0) __extension__ ({ \ + bfloat16x8x2_t __ret; \ + __builtin_neon_vld2q_bf16(&__ret, __p0, 43); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2_bf16(__p0) __extension__ ({ \ + bfloat16x4x2_t __ret; \ + __builtin_neon_vld2_bf16(&__ret, __p0, 11); \ + __ret; \ +}) +#else +#define vld2_bf16(__p0) __extension__ ({ \ + bfloat16x4x2_t __ret; \ + __builtin_neon_vld2_bf16(&__ret, __p0, 11); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2q_dup_bf16(__p0) __extension__ ({ \ + bfloat16x8x2_t __ret; \ + __builtin_neon_vld2q_dup_bf16(&__ret, __p0, 43); \ + __ret; \ +}) +#else +#define vld2q_dup_bf16(__p0) __extension__ ({ \ + bfloat16x8x2_t __ret; \ + __builtin_neon_vld2q_dup_bf16(&__ret, __p0, 43); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2_dup_bf16(__p0) __extension__ ({ \ + bfloat16x4x2_t __ret; \ + __builtin_neon_vld2_dup_bf16(&__ret, __p0, 11); \ + __ret; \ +}) +#else +#define vld2_dup_bf16(__p0) __extension__ ({ \ + bfloat16x4x2_t __ret; \ + __builtin_neon_vld2_dup_bf16(&__ret, __p0, 11); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ + bfloat16x8x2_t __ret; \ + bfloat16x8x2_t __s1 = __p1; \ + __builtin_neon_vld2q_lane_bf16(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 43); \ + __ret; \ +}) +#else +#define vld2q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ + bfloat16x8x2_t __ret; \ + bfloat16x8x2_t __s1 = __p1; \ + bfloat16x8x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vld2q_lane_bf16(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 43); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ + bfloat16x4x2_t __ret; \ + bfloat16x4x2_t __s1 = __p1; \ + __builtin_neon_vld2_lane_bf16(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 11); \ + __ret; \ +}) +#else +#define vld2_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ + bfloat16x4x2_t __ret; \ + bfloat16x4x2_t __s1 = __p1; \ + bfloat16x4x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __builtin_neon_vld2_lane_bf16(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 11); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3q_bf16(__p0) __extension__ ({ \ + bfloat16x8x3_t __ret; \ + __builtin_neon_vld3q_bf16(&__ret, __p0, 43); \ + __ret; \ +}) +#else +#define vld3q_bf16(__p0) __extension__ ({ \ + bfloat16x8x3_t __ret; \ + __builtin_neon_vld3q_bf16(&__ret, __p0, 43); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3_bf16(__p0) __extension__ ({ \ + bfloat16x4x3_t __ret; \ + __builtin_neon_vld3_bf16(&__ret, __p0, 11); \ + __ret; \ +}) +#else +#define vld3_bf16(__p0) __extension__ ({ \ + bfloat16x4x3_t __ret; \ + __builtin_neon_vld3_bf16(&__ret, __p0, 11); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3q_dup_bf16(__p0) __extension__ ({ \ + bfloat16x8x3_t __ret; \ + __builtin_neon_vld3q_dup_bf16(&__ret, __p0, 43); \ + __ret; \ +}) +#else +#define vld3q_dup_bf16(__p0) __extension__ ({ \ + bfloat16x8x3_t __ret; \ + __builtin_neon_vld3q_dup_bf16(&__ret, __p0, 43); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3_dup_bf16(__p0) __extension__ ({ \ + bfloat16x4x3_t __ret; \ + __builtin_neon_vld3_dup_bf16(&__ret, __p0, 11); \ + __ret; \ +}) +#else +#define vld3_dup_bf16(__p0) __extension__ ({ \ + bfloat16x4x3_t __ret; \ + __builtin_neon_vld3_dup_bf16(&__ret, __p0, 11); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ + bfloat16x8x3_t __ret; \ + bfloat16x8x3_t __s1 = __p1; \ + __builtin_neon_vld3q_lane_bf16(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 43); \ + __ret; \ +}) +#else +#define vld3q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ + bfloat16x8x3_t __ret; \ + bfloat16x8x3_t __s1 = __p1; \ + bfloat16x8x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vld3q_lane_bf16(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 43); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ + bfloat16x4x3_t __ret; \ + bfloat16x4x3_t __s1 = __p1; \ + __builtin_neon_vld3_lane_bf16(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 11); \ + __ret; \ +}) +#else +#define vld3_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ + bfloat16x4x3_t __ret; \ + bfloat16x4x3_t __s1 = __p1; \ + bfloat16x4x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __builtin_neon_vld3_lane_bf16(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 11); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4q_bf16(__p0) __extension__ ({ \ + bfloat16x8x4_t __ret; \ + __builtin_neon_vld4q_bf16(&__ret, __p0, 43); \ + __ret; \ +}) +#else +#define vld4q_bf16(__p0) __extension__ ({ \ + bfloat16x8x4_t __ret; \ + __builtin_neon_vld4q_bf16(&__ret, __p0, 43); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4_bf16(__p0) __extension__ ({ \ + bfloat16x4x4_t __ret; \ + __builtin_neon_vld4_bf16(&__ret, __p0, 11); \ + __ret; \ +}) +#else +#define vld4_bf16(__p0) __extension__ ({ \ + bfloat16x4x4_t __ret; \ + __builtin_neon_vld4_bf16(&__ret, __p0, 11); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4q_dup_bf16(__p0) __extension__ ({ \ + bfloat16x8x4_t __ret; \ + __builtin_neon_vld4q_dup_bf16(&__ret, __p0, 43); \ + __ret; \ +}) +#else +#define vld4q_dup_bf16(__p0) __extension__ ({ \ + bfloat16x8x4_t __ret; \ + __builtin_neon_vld4q_dup_bf16(&__ret, __p0, 43); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4_dup_bf16(__p0) __extension__ ({ \ + bfloat16x4x4_t __ret; \ + __builtin_neon_vld4_dup_bf16(&__ret, __p0, 11); \ + __ret; \ +}) +#else +#define vld4_dup_bf16(__p0) __extension__ ({ \ + bfloat16x4x4_t __ret; \ + __builtin_neon_vld4_dup_bf16(&__ret, __p0, 11); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ + bfloat16x8x4_t __ret; \ + bfloat16x8x4_t __s1 = __p1; \ + __builtin_neon_vld4q_lane_bf16(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 43); \ + __ret; \ +}) +#else +#define vld4q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ + bfloat16x8x4_t __ret; \ + bfloat16x8x4_t __s1 = __p1; \ + bfloat16x8x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vld4q_lane_bf16(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 43); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ + bfloat16x4x4_t __ret; \ + bfloat16x4x4_t __s1 = __p1; \ + __builtin_neon_vld4_lane_bf16(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 11); \ + __ret; \ +}) +#else +#define vld4_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ + bfloat16x4x4_t __ret; \ + bfloat16x4x4_t __s1 = __p1; \ + bfloat16x4x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ + __builtin_neon_vld4_lane_bf16(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 11); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsetq_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ + bfloat16x8_t __ret; \ + bfloat16_t __s0 = __p0; \ + bfloat16x8_t __s1 = __p1; \ + __ret = (bfloat16x8_t) __builtin_neon_vsetq_lane_bf16(__s0, (bfloat16x8_t)__s1, __p2); \ + __ret; \ +}) +#else +#define vsetq_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ + bfloat16x8_t __ret; \ + bfloat16_t __s0 = __p0; \ + bfloat16x8_t __s1 = __p1; \ + bfloat16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (bfloat16x8_t) __builtin_neon_vsetq_lane_bf16(__s0, (bfloat16x8_t)__rev1, __p2); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_vsetq_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ + bfloat16x8_t __ret; \ + bfloat16_t __s0 = __p0; \ + bfloat16x8_t __s1 = __p1; \ + __ret = (bfloat16x8_t) __builtin_neon_vsetq_lane_bf16(__s0, (bfloat16x8_t)__s1, __p2); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vset_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ + bfloat16x4_t __ret; \ + bfloat16_t __s0 = __p0; \ + bfloat16x4_t __s1 = __p1; \ + __ret = (bfloat16x4_t) __builtin_neon_vset_lane_bf16(__s0, (bfloat16x4_t)__s1, __p2); \ + __ret; \ +}) +#else +#define vset_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ + bfloat16x4_t __ret; \ + bfloat16_t __s0 = __p0; \ + bfloat16x4_t __s1 = __p1; \ + bfloat16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + __ret = (bfloat16x4_t) __builtin_neon_vset_lane_bf16(__s0, (bfloat16x4_t)__rev1, __p2); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_vset_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ + bfloat16x4_t __ret; \ + bfloat16_t __s0 = __p0; \ + bfloat16x4_t __s1 = __p1; \ + __ret = (bfloat16x4_t) __builtin_neon_vset_lane_bf16(__s0, (bfloat16x4_t)__s1, __p2); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_bf16(__p0, __p1) __extension__ ({ \ + bfloat16x8_t __s1 = __p1; \ + __builtin_neon_vst1q_bf16(__p0, (int8x16_t)__s1, 43); \ +}) +#else +#define vst1q_bf16(__p0, __p1) __extension__ ({ \ + bfloat16x8_t __s1 = __p1; \ + bfloat16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1q_bf16(__p0, (int8x16_t)__rev1, 43); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1_bf16(__p0, __p1) __extension__ ({ \ + bfloat16x4_t __s1 = __p1; \ + __builtin_neon_vst1_bf16(__p0, (int8x8_t)__s1, 11); \ +}) +#else +#define vst1_bf16(__p0, __p1) __extension__ ({ \ + bfloat16x4_t __s1 = __p1; \ + bfloat16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + __builtin_neon_vst1_bf16(__p0, (int8x8_t)__rev1, 11); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ + bfloat16x8_t __s1 = __p1; \ + __builtin_neon_vst1q_lane_bf16(__p0, (int8x16_t)__s1, __p2, 43); \ +}) +#else +#define vst1q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ + bfloat16x8_t __s1 = __p1; \ + bfloat16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1q_lane_bf16(__p0, (int8x16_t)__rev1, __p2, 43); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ + bfloat16x4_t __s1 = __p1; \ + __builtin_neon_vst1_lane_bf16(__p0, (int8x8_t)__s1, __p2, 11); \ +}) +#else +#define vst1_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ + bfloat16x4_t __s1 = __p1; \ + bfloat16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + __builtin_neon_vst1_lane_bf16(__p0, (int8x8_t)__rev1, __p2, 11); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_bf16_x2(__p0, __p1) __extension__ ({ \ + bfloat16x8x2_t __s1 = __p1; \ + __builtin_neon_vst1q_bf16_x2(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 43); \ +}) +#else +#define vst1q_bf16_x2(__p0, __p1) __extension__ ({ \ + bfloat16x8x2_t __s1 = __p1; \ + bfloat16x8x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1q_bf16_x2(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 43); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1_bf16_x2(__p0, __p1) __extension__ ({ \ + bfloat16x4x2_t __s1 = __p1; \ + __builtin_neon_vst1_bf16_x2(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 11); \ +}) +#else +#define vst1_bf16_x2(__p0, __p1) __extension__ ({ \ + bfloat16x4x2_t __s1 = __p1; \ + bfloat16x4x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __builtin_neon_vst1_bf16_x2(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 11); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_bf16_x3(__p0, __p1) __extension__ ({ \ + bfloat16x8x3_t __s1 = __p1; \ + __builtin_neon_vst1q_bf16_x3(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 43); \ +}) +#else +#define vst1q_bf16_x3(__p0, __p1) __extension__ ({ \ + bfloat16x8x3_t __s1 = __p1; \ + bfloat16x8x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1q_bf16_x3(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 43); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1_bf16_x3(__p0, __p1) __extension__ ({ \ + bfloat16x4x3_t __s1 = __p1; \ + __builtin_neon_vst1_bf16_x3(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 11); \ +}) +#else +#define vst1_bf16_x3(__p0, __p1) __extension__ ({ \ + bfloat16x4x3_t __s1 = __p1; \ + bfloat16x4x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __builtin_neon_vst1_bf16_x3(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 11); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_bf16_x4(__p0, __p1) __extension__ ({ \ + bfloat16x8x4_t __s1 = __p1; \ + __builtin_neon_vst1q_bf16_x4(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 43); \ +}) +#else +#define vst1q_bf16_x4(__p0, __p1) __extension__ ({ \ + bfloat16x8x4_t __s1 = __p1; \ + bfloat16x8x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1q_bf16_x4(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 43); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1_bf16_x4(__p0, __p1) __extension__ ({ \ + bfloat16x4x4_t __s1 = __p1; \ + __builtin_neon_vst1_bf16_x4(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 11); \ +}) +#else +#define vst1_bf16_x4(__p0, __p1) __extension__ ({ \ + bfloat16x4x4_t __s1 = __p1; \ + bfloat16x4x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ + __builtin_neon_vst1_bf16_x4(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 11); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst2q_bf16(__p0, __p1) __extension__ ({ \ + bfloat16x8x2_t __s1 = __p1; \ + __builtin_neon_vst2q_bf16(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 43); \ +}) +#else +#define vst2q_bf16(__p0, __p1) __extension__ ({ \ + bfloat16x8x2_t __s1 = __p1; \ + bfloat16x8x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst2q_bf16(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 43); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst2_bf16(__p0, __p1) __extension__ ({ \ + bfloat16x4x2_t __s1 = __p1; \ + __builtin_neon_vst2_bf16(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 11); \ +}) +#else +#define vst2_bf16(__p0, __p1) __extension__ ({ \ + bfloat16x4x2_t __s1 = __p1; \ + bfloat16x4x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __builtin_neon_vst2_bf16(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 11); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst2q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ + bfloat16x8x2_t __s1 = __p1; \ + __builtin_neon_vst2q_lane_bf16(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 43); \ +}) +#else +#define vst2q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ + bfloat16x8x2_t __s1 = __p1; \ + bfloat16x8x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst2q_lane_bf16(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 43); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst2_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ + bfloat16x4x2_t __s1 = __p1; \ + __builtin_neon_vst2_lane_bf16(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 11); \ +}) +#else +#define vst2_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ + bfloat16x4x2_t __s1 = __p1; \ + bfloat16x4x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __builtin_neon_vst2_lane_bf16(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 11); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst3q_bf16(__p0, __p1) __extension__ ({ \ + bfloat16x8x3_t __s1 = __p1; \ + __builtin_neon_vst3q_bf16(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 43); \ +}) +#else +#define vst3q_bf16(__p0, __p1) __extension__ ({ \ + bfloat16x8x3_t __s1 = __p1; \ + bfloat16x8x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst3q_bf16(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 43); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst3_bf16(__p0, __p1) __extension__ ({ \ + bfloat16x4x3_t __s1 = __p1; \ + __builtin_neon_vst3_bf16(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 11); \ +}) +#else +#define vst3_bf16(__p0, __p1) __extension__ ({ \ + bfloat16x4x3_t __s1 = __p1; \ + bfloat16x4x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __builtin_neon_vst3_bf16(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 11); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst3q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ + bfloat16x8x3_t __s1 = __p1; \ + __builtin_neon_vst3q_lane_bf16(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 43); \ +}) +#else +#define vst3q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ + bfloat16x8x3_t __s1 = __p1; \ + bfloat16x8x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst3q_lane_bf16(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 43); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst3_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ + bfloat16x4x3_t __s1 = __p1; \ + __builtin_neon_vst3_lane_bf16(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 11); \ +}) +#else +#define vst3_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ + bfloat16x4x3_t __s1 = __p1; \ + bfloat16x4x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __builtin_neon_vst3_lane_bf16(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 11); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst4q_bf16(__p0, __p1) __extension__ ({ \ + bfloat16x8x4_t __s1 = __p1; \ + __builtin_neon_vst4q_bf16(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 43); \ +}) +#else +#define vst4q_bf16(__p0, __p1) __extension__ ({ \ + bfloat16x8x4_t __s1 = __p1; \ + bfloat16x8x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst4q_bf16(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 43); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst4_bf16(__p0, __p1) __extension__ ({ \ + bfloat16x4x4_t __s1 = __p1; \ + __builtin_neon_vst4_bf16(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 11); \ +}) +#else +#define vst4_bf16(__p0, __p1) __extension__ ({ \ + bfloat16x4x4_t __s1 = __p1; \ + bfloat16x4x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ + __builtin_neon_vst4_bf16(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 11); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst4q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ + bfloat16x8x4_t __s1 = __p1; \ + __builtin_neon_vst4q_lane_bf16(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 43); \ +}) +#else +#define vst4q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ + bfloat16x8x4_t __s1 = __p1; \ + bfloat16x8x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst4q_lane_bf16(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 43); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst4_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ + bfloat16x4x4_t __s1 = __p1; \ + __builtin_neon_vst4_lane_bf16(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 11); \ +}) +#else +#define vst4_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ + bfloat16x4x4_t __s1 = __p1; \ + bfloat16x4x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ + __builtin_neon_vst4_lane_bf16(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 11); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("dotprod"))) uint32x4_t vdotq_u32(uint32x4_t __p0, uint8x16_t __p1, uint8x16_t __p2) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vdotq_u32((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 50); + return __ret; +} +#else +__ai __attribute__((target("dotprod"))) uint32x4_t vdotq_u32(uint32x4_t __p0, uint8x16_t __p1, uint8x16_t __p2) { + uint32x4_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint32x4_t) __builtin_neon_vdotq_u32((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("dotprod"))) uint32x4_t __noswap_vdotq_u32(uint32x4_t __p0, uint8x16_t __p1, uint8x16_t __p2) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vdotq_u32((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 50); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("dotprod"))) int32x4_t vdotq_s32(int32x4_t __p0, int8x16_t __p1, int8x16_t __p2) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vdotq_s32((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 34); + return __ret; +} +#else +__ai __attribute__((target("dotprod"))) int32x4_t vdotq_s32(int32x4_t __p0, int8x16_t __p1, int8x16_t __p2) { + int32x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int32x4_t) __builtin_neon_vdotq_s32((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 34); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("dotprod"))) int32x4_t __noswap_vdotq_s32(int32x4_t __p0, int8x16_t __p1, int8x16_t __p2) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vdotq_s32((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 34); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("dotprod"))) uint32x2_t vdot_u32(uint32x2_t __p0, uint8x8_t __p1, uint8x8_t __p2) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vdot_u32((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 18); + return __ret; +} +#else +__ai __attribute__((target("dotprod"))) uint32x2_t vdot_u32(uint32x2_t __p0, uint8x8_t __p1, uint8x8_t __p2) { + uint32x2_t __ret; + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint32x2_t) __builtin_neon_vdot_u32((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai __attribute__((target("dotprod"))) uint32x2_t __noswap_vdot_u32(uint32x2_t __p0, uint8x8_t __p1, uint8x8_t __p2) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vdot_u32((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 18); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("dotprod"))) int32x2_t vdot_s32(int32x2_t __p0, int8x8_t __p1, int8x8_t __p2) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vdot_s32((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 2); + return __ret; +} +#else +__ai __attribute__((target("dotprod"))) int32x2_t vdot_s32(int32x2_t __p0, int8x8_t __p1, int8x8_t __p2) { + int32x2_t __ret; + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int32x2_t) __builtin_neon_vdot_s32((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai __attribute__((target("dotprod"))) int32x2_t __noswap_vdot_s32(int32x2_t __p0, int8x8_t __p1, int8x8_t __p2) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vdot_s32((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 2); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdotq_lane_u32(__p0_145, __p1_145, __p2_145, __p3_145) __extension__ ({ \ + uint32x4_t __ret_145; \ + uint32x4_t __s0_145 = __p0_145; \ + uint8x16_t __s1_145 = __p1_145; \ + uint8x8_t __s2_145 = __p2_145; \ +uint8x8_t __reint_145 = __s2_145; \ +uint32x4_t __reint1_145 = splatq_lane_u32(*(uint32x2_t *) &__reint_145, __p3_145); \ + __ret_145 = vdotq_u32(__s0_145, __s1_145, *(uint8x16_t *) &__reint1_145); \ + __ret_145; \ +}) +#else +#define vdotq_lane_u32(__p0_146, __p1_146, __p2_146, __p3_146) __extension__ ({ \ + uint32x4_t __ret_146; \ + uint32x4_t __s0_146 = __p0_146; \ + uint8x16_t __s1_146 = __p1_146; \ + uint8x8_t __s2_146 = __p2_146; \ + uint32x4_t __rev0_146; __rev0_146 = __builtin_shufflevector(__s0_146, __s0_146, 3, 2, 1, 0); \ + uint8x16_t __rev1_146; __rev1_146 = __builtin_shufflevector(__s1_146, __s1_146, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint8x8_t __rev2_146; __rev2_146 = __builtin_shufflevector(__s2_146, __s2_146, 7, 6, 5, 4, 3, 2, 1, 0); \ +uint8x8_t __reint_146 = __rev2_146; \ +uint32x4_t __reint1_146 = __noswap_splatq_lane_u32(*(uint32x2_t *) &__reint_146, __p3_146); \ + __ret_146 = __noswap_vdotq_u32(__rev0_146, __rev1_146, *(uint8x16_t *) &__reint1_146); \ + __ret_146 = __builtin_shufflevector(__ret_146, __ret_146, 3, 2, 1, 0); \ + __ret_146; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdotq_lane_s32(__p0_147, __p1_147, __p2_147, __p3_147) __extension__ ({ \ + int32x4_t __ret_147; \ + int32x4_t __s0_147 = __p0_147; \ + int8x16_t __s1_147 = __p1_147; \ + int8x8_t __s2_147 = __p2_147; \ +int8x8_t __reint_147 = __s2_147; \ +int32x4_t __reint1_147 = splatq_lane_s32(*(int32x2_t *) &__reint_147, __p3_147); \ + __ret_147 = vdotq_s32(__s0_147, __s1_147, *(int8x16_t *) &__reint1_147); \ + __ret_147; \ +}) +#else +#define vdotq_lane_s32(__p0_148, __p1_148, __p2_148, __p3_148) __extension__ ({ \ + int32x4_t __ret_148; \ + int32x4_t __s0_148 = __p0_148; \ + int8x16_t __s1_148 = __p1_148; \ + int8x8_t __s2_148 = __p2_148; \ + int32x4_t __rev0_148; __rev0_148 = __builtin_shufflevector(__s0_148, __s0_148, 3, 2, 1, 0); \ + int8x16_t __rev1_148; __rev1_148 = __builtin_shufflevector(__s1_148, __s1_148, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + int8x8_t __rev2_148; __rev2_148 = __builtin_shufflevector(__s2_148, __s2_148, 7, 6, 5, 4, 3, 2, 1, 0); \ +int8x8_t __reint_148 = __rev2_148; \ +int32x4_t __reint1_148 = __noswap_splatq_lane_s32(*(int32x2_t *) &__reint_148, __p3_148); \ + __ret_148 = __noswap_vdotq_s32(__rev0_148, __rev1_148, *(int8x16_t *) &__reint1_148); \ + __ret_148 = __builtin_shufflevector(__ret_148, __ret_148, 3, 2, 1, 0); \ + __ret_148; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdot_lane_u32(__p0_149, __p1_149, __p2_149, __p3_149) __extension__ ({ \ + uint32x2_t __ret_149; \ + uint32x2_t __s0_149 = __p0_149; \ + uint8x8_t __s1_149 = __p1_149; \ + uint8x8_t __s2_149 = __p2_149; \ +uint8x8_t __reint_149 = __s2_149; \ +uint32x2_t __reint1_149 = splat_lane_u32(*(uint32x2_t *) &__reint_149, __p3_149); \ + __ret_149 = vdot_u32(__s0_149, __s1_149, *(uint8x8_t *) &__reint1_149); \ + __ret_149; \ +}) +#else +#define vdot_lane_u32(__p0_150, __p1_150, __p2_150, __p3_150) __extension__ ({ \ + uint32x2_t __ret_150; \ + uint32x2_t __s0_150 = __p0_150; \ + uint8x8_t __s1_150 = __p1_150; \ + uint8x8_t __s2_150 = __p2_150; \ + uint32x2_t __rev0_150; __rev0_150 = __builtin_shufflevector(__s0_150, __s0_150, 1, 0); \ + uint8x8_t __rev1_150; __rev1_150 = __builtin_shufflevector(__s1_150, __s1_150, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint8x8_t __rev2_150; __rev2_150 = __builtin_shufflevector(__s2_150, __s2_150, 7, 6, 5, 4, 3, 2, 1, 0); \ +uint8x8_t __reint_150 = __rev2_150; \ +uint32x2_t __reint1_150 = __noswap_splat_lane_u32(*(uint32x2_t *) &__reint_150, __p3_150); \ + __ret_150 = __noswap_vdot_u32(__rev0_150, __rev1_150, *(uint8x8_t *) &__reint1_150); \ + __ret_150 = __builtin_shufflevector(__ret_150, __ret_150, 1, 0); \ + __ret_150; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdot_lane_s32(__p0_151, __p1_151, __p2_151, __p3_151) __extension__ ({ \ + int32x2_t __ret_151; \ + int32x2_t __s0_151 = __p0_151; \ + int8x8_t __s1_151 = __p1_151; \ + int8x8_t __s2_151 = __p2_151; \ +int8x8_t __reint_151 = __s2_151; \ +int32x2_t __reint1_151 = splat_lane_s32(*(int32x2_t *) &__reint_151, __p3_151); \ + __ret_151 = vdot_s32(__s0_151, __s1_151, *(int8x8_t *) &__reint1_151); \ + __ret_151; \ +}) +#else +#define vdot_lane_s32(__p0_152, __p1_152, __p2_152, __p3_152) __extension__ ({ \ + int32x2_t __ret_152; \ + int32x2_t __s0_152 = __p0_152; \ + int8x8_t __s1_152 = __p1_152; \ + int8x8_t __s2_152 = __p2_152; \ + int32x2_t __rev0_152; __rev0_152 = __builtin_shufflevector(__s0_152, __s0_152, 1, 0); \ + int8x8_t __rev1_152; __rev1_152 = __builtin_shufflevector(__s1_152, __s1_152, 7, 6, 5, 4, 3, 2, 1, 0); \ + int8x8_t __rev2_152; __rev2_152 = __builtin_shufflevector(__s2_152, __s2_152, 7, 6, 5, 4, 3, 2, 1, 0); \ +int8x8_t __reint_152 = __rev2_152; \ +int32x2_t __reint1_152 = __noswap_splat_lane_s32(*(int32x2_t *) &__reint_152, __p3_152); \ + __ret_152 = __noswap_vdot_s32(__rev0_152, __rev1_152, *(int8x8_t *) &__reint1_152); \ + __ret_152 = __builtin_shufflevector(__ret_152, __ret_152, 1, 0); \ + __ret_152; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) float16x8_t vabdq_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + __ret = (float16x8_t) __builtin_neon_vabdq_f16((int8x16_t)__p0, (int8x16_t)__p1, 40); + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) float16x8_t vabdq_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (float16x8_t) __builtin_neon_vabdq_f16((int8x16_t)__rev0, (int8x16_t)__rev1, 40); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) float16x4_t vabd_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vabd_f16((int8x8_t)__p0, (int8x8_t)__p1, 8); + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) float16x4_t vabd_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (float16x4_t) __builtin_neon_vabd_f16((int8x8_t)__rev0, (int8x8_t)__rev1, 8); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) float16x8_t vabsq_f16(float16x8_t __p0) { + float16x8_t __ret; + __ret = (float16x8_t) __builtin_neon_vabsq_f16((int8x16_t)__p0, 40); + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) float16x8_t vabsq_f16(float16x8_t __p0) { + float16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (float16x8_t) __builtin_neon_vabsq_f16((int8x16_t)__rev0, 40); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) float16x4_t vabs_f16(float16x4_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vabs_f16((int8x8_t)__p0, 8); + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) float16x4_t vabs_f16(float16x4_t __p0) { + float16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (float16x4_t) __builtin_neon_vabs_f16((int8x8_t)__rev0, 8); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) float16x8_t vaddq_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + __ret = __p0 + __p1; + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) float16x8_t vaddq_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 + __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) float16x4_t vadd_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + __ret = __p0 + __p1; + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) float16x4_t vadd_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __rev0 + __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) float16x8_t vbslq_f16(uint16x8_t __p0, float16x8_t __p1, float16x8_t __p2) { + float16x8_t __ret; + __ret = (float16x8_t) __builtin_neon_vbslq_f16((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 40); + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) float16x8_t vbslq_f16(uint16x8_t __p0, float16x8_t __p1, float16x8_t __p2) { + float16x8_t __ret; + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (float16x8_t) __builtin_neon_vbslq_f16((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 40); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) float16x4_t vbsl_f16(uint16x4_t __p0, float16x4_t __p1, float16x4_t __p2) { + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vbsl_f16((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 8); + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) float16x4_t vbsl_f16(uint16x4_t __p0, float16x4_t __p1, float16x4_t __p2) { + float16x4_t __ret; + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + float16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = (float16x4_t) __builtin_neon_vbsl_f16((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 8); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) uint16x8_t vcageq_f16(float16x8_t __p0, float16x8_t __p1) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vcageq_f16((int8x16_t)__p0, (int8x16_t)__p1, 49); + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) uint16x8_t vcageq_f16(float16x8_t __p0, float16x8_t __p1) { + uint16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint16x8_t) __builtin_neon_vcageq_f16((int8x16_t)__rev0, (int8x16_t)__rev1, 49); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) uint16x4_t vcage_f16(float16x4_t __p0, float16x4_t __p1) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vcage_f16((int8x8_t)__p0, (int8x8_t)__p1, 17); + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) uint16x4_t vcage_f16(float16x4_t __p0, float16x4_t __p1) { + uint16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint16x4_t) __builtin_neon_vcage_f16((int8x8_t)__rev0, (int8x8_t)__rev1, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) uint16x8_t vcagtq_f16(float16x8_t __p0, float16x8_t __p1) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vcagtq_f16((int8x16_t)__p0, (int8x16_t)__p1, 49); + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) uint16x8_t vcagtq_f16(float16x8_t __p0, float16x8_t __p1) { + uint16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint16x8_t) __builtin_neon_vcagtq_f16((int8x16_t)__rev0, (int8x16_t)__rev1, 49); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) uint16x4_t vcagt_f16(float16x4_t __p0, float16x4_t __p1) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vcagt_f16((int8x8_t)__p0, (int8x8_t)__p1, 17); + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) uint16x4_t vcagt_f16(float16x4_t __p0, float16x4_t __p1) { + uint16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint16x4_t) __builtin_neon_vcagt_f16((int8x8_t)__rev0, (int8x8_t)__rev1, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) uint16x8_t vcaleq_f16(float16x8_t __p0, float16x8_t __p1) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vcaleq_f16((int8x16_t)__p0, (int8x16_t)__p1, 49); + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) uint16x8_t vcaleq_f16(float16x8_t __p0, float16x8_t __p1) { + uint16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint16x8_t) __builtin_neon_vcaleq_f16((int8x16_t)__rev0, (int8x16_t)__rev1, 49); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) uint16x4_t vcale_f16(float16x4_t __p0, float16x4_t __p1) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vcale_f16((int8x8_t)__p0, (int8x8_t)__p1, 17); + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) uint16x4_t vcale_f16(float16x4_t __p0, float16x4_t __p1) { + uint16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint16x4_t) __builtin_neon_vcale_f16((int8x8_t)__rev0, (int8x8_t)__rev1, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) uint16x8_t vcaltq_f16(float16x8_t __p0, float16x8_t __p1) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vcaltq_f16((int8x16_t)__p0, (int8x16_t)__p1, 49); + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) uint16x8_t vcaltq_f16(float16x8_t __p0, float16x8_t __p1) { + uint16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint16x8_t) __builtin_neon_vcaltq_f16((int8x16_t)__rev0, (int8x16_t)__rev1, 49); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) uint16x4_t vcalt_f16(float16x4_t __p0, float16x4_t __p1) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vcalt_f16((int8x8_t)__p0, (int8x8_t)__p1, 17); + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) uint16x4_t vcalt_f16(float16x4_t __p0, float16x4_t __p1) { + uint16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint16x4_t) __builtin_neon_vcalt_f16((int8x8_t)__rev0, (int8x8_t)__rev1, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) uint16x8_t vceqq_f16(float16x8_t __p0, float16x8_t __p1) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0 == __p1); + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) uint16x8_t vceqq_f16(float16x8_t __p0, float16x8_t __p1) { + uint16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint16x8_t)(__rev0 == __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) uint16x4_t vceq_f16(float16x4_t __p0, float16x4_t __p1) { + uint16x4_t __ret; + __ret = (uint16x4_t)(__p0 == __p1); + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) uint16x4_t vceq_f16(float16x4_t __p0, float16x4_t __p1) { + uint16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint16x4_t)(__rev0 == __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) uint16x8_t vceqzq_f16(float16x8_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vceqzq_f16((int8x16_t)__p0, 49); + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) uint16x8_t vceqzq_f16(float16x8_t __p0) { + uint16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint16x8_t) __builtin_neon_vceqzq_f16((int8x16_t)__rev0, 49); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) uint16x4_t vceqz_f16(float16x4_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vceqz_f16((int8x8_t)__p0, 17); + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) uint16x4_t vceqz_f16(float16x4_t __p0) { + uint16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (uint16x4_t) __builtin_neon_vceqz_f16((int8x8_t)__rev0, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) uint16x8_t vcgeq_f16(float16x8_t __p0, float16x8_t __p1) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0 >= __p1); + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) uint16x8_t vcgeq_f16(float16x8_t __p0, float16x8_t __p1) { + uint16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint16x8_t)(__rev0 >= __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) uint16x4_t vcge_f16(float16x4_t __p0, float16x4_t __p1) { + uint16x4_t __ret; + __ret = (uint16x4_t)(__p0 >= __p1); + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) uint16x4_t vcge_f16(float16x4_t __p0, float16x4_t __p1) { + uint16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint16x4_t)(__rev0 >= __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) uint16x8_t vcgezq_f16(float16x8_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vcgezq_f16((int8x16_t)__p0, 49); + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) uint16x8_t vcgezq_f16(float16x8_t __p0) { + uint16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint16x8_t) __builtin_neon_vcgezq_f16((int8x16_t)__rev0, 49); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) uint16x4_t vcgez_f16(float16x4_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vcgez_f16((int8x8_t)__p0, 17); + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) uint16x4_t vcgez_f16(float16x4_t __p0) { + uint16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (uint16x4_t) __builtin_neon_vcgez_f16((int8x8_t)__rev0, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) uint16x8_t vcgtq_f16(float16x8_t __p0, float16x8_t __p1) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0 > __p1); + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) uint16x8_t vcgtq_f16(float16x8_t __p0, float16x8_t __p1) { + uint16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint16x8_t)(__rev0 > __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) uint16x4_t vcgt_f16(float16x4_t __p0, float16x4_t __p1) { + uint16x4_t __ret; + __ret = (uint16x4_t)(__p0 > __p1); + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) uint16x4_t vcgt_f16(float16x4_t __p0, float16x4_t __p1) { + uint16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint16x4_t)(__rev0 > __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) uint16x8_t vcgtzq_f16(float16x8_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vcgtzq_f16((int8x16_t)__p0, 49); + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) uint16x8_t vcgtzq_f16(float16x8_t __p0) { + uint16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint16x8_t) __builtin_neon_vcgtzq_f16((int8x16_t)__rev0, 49); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) uint16x4_t vcgtz_f16(float16x4_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vcgtz_f16((int8x8_t)__p0, 17); + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) uint16x4_t vcgtz_f16(float16x4_t __p0) { + uint16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (uint16x4_t) __builtin_neon_vcgtz_f16((int8x8_t)__rev0, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) uint16x8_t vcleq_f16(float16x8_t __p0, float16x8_t __p1) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0 <= __p1); + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) uint16x8_t vcleq_f16(float16x8_t __p0, float16x8_t __p1) { + uint16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint16x8_t)(__rev0 <= __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) uint16x4_t vcle_f16(float16x4_t __p0, float16x4_t __p1) { + uint16x4_t __ret; + __ret = (uint16x4_t)(__p0 <= __p1); + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) uint16x4_t vcle_f16(float16x4_t __p0, float16x4_t __p1) { + uint16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint16x4_t)(__rev0 <= __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) uint16x8_t vclezq_f16(float16x8_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vclezq_f16((int8x16_t)__p0, 49); + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) uint16x8_t vclezq_f16(float16x8_t __p0) { + uint16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint16x8_t) __builtin_neon_vclezq_f16((int8x16_t)__rev0, 49); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) uint16x4_t vclez_f16(float16x4_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vclez_f16((int8x8_t)__p0, 17); + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) uint16x4_t vclez_f16(float16x4_t __p0) { + uint16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (uint16x4_t) __builtin_neon_vclez_f16((int8x8_t)__rev0, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) uint16x8_t vcltq_f16(float16x8_t __p0, float16x8_t __p1) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0 < __p1); + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) uint16x8_t vcltq_f16(float16x8_t __p0, float16x8_t __p1) { + uint16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint16x8_t)(__rev0 < __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) uint16x4_t vclt_f16(float16x4_t __p0, float16x4_t __p1) { + uint16x4_t __ret; + __ret = (uint16x4_t)(__p0 < __p1); + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) uint16x4_t vclt_f16(float16x4_t __p0, float16x4_t __p1) { + uint16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint16x4_t)(__rev0 < __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) uint16x8_t vcltzq_f16(float16x8_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vcltzq_f16((int8x16_t)__p0, 49); + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) uint16x8_t vcltzq_f16(float16x8_t __p0) { + uint16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint16x8_t) __builtin_neon_vcltzq_f16((int8x16_t)__rev0, 49); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) uint16x4_t vcltz_f16(float16x4_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vcltz_f16((int8x8_t)__p0, 17); + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) uint16x4_t vcltz_f16(float16x4_t __p0) { + uint16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (uint16x4_t) __builtin_neon_vcltz_f16((int8x8_t)__rev0, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) float16x8_t vcvtq_f16_u16(uint16x8_t __p0) { + float16x8_t __ret; + __ret = (float16x8_t) __builtin_neon_vcvtq_f16_u16((int8x16_t)__p0, 49); + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) float16x8_t vcvtq_f16_u16(uint16x8_t __p0) { + float16x8_t __ret; + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (float16x8_t) __builtin_neon_vcvtq_f16_u16((int8x16_t)__rev0, 49); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) float16x8_t vcvtq_f16_s16(int16x8_t __p0) { + float16x8_t __ret; + __ret = (float16x8_t) __builtin_neon_vcvtq_f16_s16((int8x16_t)__p0, 33); + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) float16x8_t vcvtq_f16_s16(int16x8_t __p0) { + float16x8_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (float16x8_t) __builtin_neon_vcvtq_f16_s16((int8x16_t)__rev0, 33); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) float16x4_t vcvt_f16_u16(uint16x4_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vcvt_f16_u16((int8x8_t)__p0, 17); + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) float16x4_t vcvt_f16_u16(uint16x4_t __p0) { + float16x4_t __ret; + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (float16x4_t) __builtin_neon_vcvt_f16_u16((int8x8_t)__rev0, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) float16x4_t vcvt_f16_s16(int16x4_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vcvt_f16_s16((int8x8_t)__p0, 1); + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) float16x4_t vcvt_f16_s16(int16x4_t __p0) { + float16x4_t __ret; + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (float16x4_t) __builtin_neon_vcvt_f16_s16((int8x8_t)__rev0, 1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcvtq_n_f16_u16(__p0, __p1) __extension__ ({ \ + float16x8_t __ret; \ + uint16x8_t __s0 = __p0; \ + __ret = (float16x8_t) __builtin_neon_vcvtq_n_f16_u16((int8x16_t)__s0, __p1, 49); \ + __ret; \ +}) +#else +#define vcvtq_n_f16_u16(__p0, __p1) __extension__ ({ \ + float16x8_t __ret; \ + uint16x8_t __s0 = __p0; \ + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (float16x8_t) __builtin_neon_vcvtq_n_f16_u16((int8x16_t)__rev0, __p1, 49); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcvtq_n_f16_s16(__p0, __p1) __extension__ ({ \ + float16x8_t __ret; \ + int16x8_t __s0 = __p0; \ + __ret = (float16x8_t) __builtin_neon_vcvtq_n_f16_s16((int8x16_t)__s0, __p1, 33); \ + __ret; \ +}) +#else +#define vcvtq_n_f16_s16(__p0, __p1) __extension__ ({ \ + float16x8_t __ret; \ + int16x8_t __s0 = __p0; \ + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (float16x8_t) __builtin_neon_vcvtq_n_f16_s16((int8x16_t)__rev0, __p1, 33); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcvt_n_f16_u16(__p0, __p1) __extension__ ({ \ + float16x4_t __ret; \ + uint16x4_t __s0 = __p0; \ + __ret = (float16x4_t) __builtin_neon_vcvt_n_f16_u16((int8x8_t)__s0, __p1, 17); \ + __ret; \ +}) +#else +#define vcvt_n_f16_u16(__p0, __p1) __extension__ ({ \ + float16x4_t __ret; \ + uint16x4_t __s0 = __p0; \ + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + __ret = (float16x4_t) __builtin_neon_vcvt_n_f16_u16((int8x8_t)__rev0, __p1, 17); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcvt_n_f16_s16(__p0, __p1) __extension__ ({ \ + float16x4_t __ret; \ + int16x4_t __s0 = __p0; \ + __ret = (float16x4_t) __builtin_neon_vcvt_n_f16_s16((int8x8_t)__s0, __p1, 1); \ + __ret; \ +}) +#else +#define vcvt_n_f16_s16(__p0, __p1) __extension__ ({ \ + float16x4_t __ret; \ + int16x4_t __s0 = __p0; \ + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + __ret = (float16x4_t) __builtin_neon_vcvt_n_f16_s16((int8x8_t)__rev0, __p1, 1); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcvtq_n_s16_f16(__p0, __p1) __extension__ ({ \ + int16x8_t __ret; \ + float16x8_t __s0 = __p0; \ + __ret = (int16x8_t) __builtin_neon_vcvtq_n_s16_f16((int8x16_t)__s0, __p1, 33); \ + __ret; \ +}) +#else +#define vcvtq_n_s16_f16(__p0, __p1) __extension__ ({ \ + int16x8_t __ret; \ + float16x8_t __s0 = __p0; \ + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (int16x8_t) __builtin_neon_vcvtq_n_s16_f16((int8x16_t)__rev0, __p1, 33); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcvt_n_s16_f16(__p0, __p1) __extension__ ({ \ + int16x4_t __ret; \ + float16x4_t __s0 = __p0; \ + __ret = (int16x4_t) __builtin_neon_vcvt_n_s16_f16((int8x8_t)__s0, __p1, 1); \ + __ret; \ +}) +#else +#define vcvt_n_s16_f16(__p0, __p1) __extension__ ({ \ + int16x4_t __ret; \ + float16x4_t __s0 = __p0; \ + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + __ret = (int16x4_t) __builtin_neon_vcvt_n_s16_f16((int8x8_t)__rev0, __p1, 1); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcvtq_n_u16_f16(__p0, __p1) __extension__ ({ \ + uint16x8_t __ret; \ + float16x8_t __s0 = __p0; \ + __ret = (uint16x8_t) __builtin_neon_vcvtq_n_u16_f16((int8x16_t)__s0, __p1, 49); \ + __ret; \ +}) +#else +#define vcvtq_n_u16_f16(__p0, __p1) __extension__ ({ \ + uint16x8_t __ret; \ + float16x8_t __s0 = __p0; \ + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (uint16x8_t) __builtin_neon_vcvtq_n_u16_f16((int8x16_t)__rev0, __p1, 49); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcvt_n_u16_f16(__p0, __p1) __extension__ ({ \ + uint16x4_t __ret; \ + float16x4_t __s0 = __p0; \ + __ret = (uint16x4_t) __builtin_neon_vcvt_n_u16_f16((int8x8_t)__s0, __p1, 17); \ + __ret; \ +}) +#else +#define vcvt_n_u16_f16(__p0, __p1) __extension__ ({ \ + uint16x4_t __ret; \ + float16x4_t __s0 = __p0; \ + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + __ret = (uint16x4_t) __builtin_neon_vcvt_n_u16_f16((int8x8_t)__rev0, __p1, 17); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) int16x8_t vcvtq_s16_f16(float16x8_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vcvtq_s16_f16((int8x16_t)__p0, 33); + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) int16x8_t vcvtq_s16_f16(float16x8_t __p0) { + int16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int16x8_t) __builtin_neon_vcvtq_s16_f16((int8x16_t)__rev0, 33); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) int16x4_t vcvt_s16_f16(float16x4_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vcvt_s16_f16((int8x8_t)__p0, 1); + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) int16x4_t vcvt_s16_f16(float16x4_t __p0) { + int16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (int16x4_t) __builtin_neon_vcvt_s16_f16((int8x8_t)__rev0, 1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) uint16x8_t vcvtq_u16_f16(float16x8_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vcvtq_u16_f16((int8x16_t)__p0, 49); + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) uint16x8_t vcvtq_u16_f16(float16x8_t __p0) { + uint16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint16x8_t) __builtin_neon_vcvtq_u16_f16((int8x16_t)__rev0, 49); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) uint16x4_t vcvt_u16_f16(float16x4_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vcvt_u16_f16((int8x8_t)__p0, 17); + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) uint16x4_t vcvt_u16_f16(float16x4_t __p0) { + uint16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (uint16x4_t) __builtin_neon_vcvt_u16_f16((int8x8_t)__rev0, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) int16x8_t vcvtaq_s16_f16(float16x8_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vcvtaq_s16_f16((int8x16_t)__p0, 33); + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) int16x8_t vcvtaq_s16_f16(float16x8_t __p0) { + int16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int16x8_t) __builtin_neon_vcvtaq_s16_f16((int8x16_t)__rev0, 33); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) int16x4_t vcvta_s16_f16(float16x4_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vcvta_s16_f16((int8x8_t)__p0, 1); + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) int16x4_t vcvta_s16_f16(float16x4_t __p0) { + int16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (int16x4_t) __builtin_neon_vcvta_s16_f16((int8x8_t)__rev0, 1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) uint16x8_t vcvtaq_u16_f16(float16x8_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vcvtaq_u16_f16((int8x16_t)__p0, 49); + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) uint16x8_t vcvtaq_u16_f16(float16x8_t __p0) { + uint16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint16x8_t) __builtin_neon_vcvtaq_u16_f16((int8x16_t)__rev0, 49); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) uint16x4_t vcvta_u16_f16(float16x4_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vcvta_u16_f16((int8x8_t)__p0, 17); + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) uint16x4_t vcvta_u16_f16(float16x4_t __p0) { + uint16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (uint16x4_t) __builtin_neon_vcvta_u16_f16((int8x8_t)__rev0, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) int16x8_t vcvtmq_s16_f16(float16x8_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vcvtmq_s16_f16((int8x16_t)__p0, 33); + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) int16x8_t vcvtmq_s16_f16(float16x8_t __p0) { + int16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int16x8_t) __builtin_neon_vcvtmq_s16_f16((int8x16_t)__rev0, 33); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) int16x4_t vcvtm_s16_f16(float16x4_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vcvtm_s16_f16((int8x8_t)__p0, 1); + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) int16x4_t vcvtm_s16_f16(float16x4_t __p0) { + int16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (int16x4_t) __builtin_neon_vcvtm_s16_f16((int8x8_t)__rev0, 1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) uint16x8_t vcvtmq_u16_f16(float16x8_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vcvtmq_u16_f16((int8x16_t)__p0, 49); + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) uint16x8_t vcvtmq_u16_f16(float16x8_t __p0) { + uint16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint16x8_t) __builtin_neon_vcvtmq_u16_f16((int8x16_t)__rev0, 49); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) uint16x4_t vcvtm_u16_f16(float16x4_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vcvtm_u16_f16((int8x8_t)__p0, 17); + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) uint16x4_t vcvtm_u16_f16(float16x4_t __p0) { + uint16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (uint16x4_t) __builtin_neon_vcvtm_u16_f16((int8x8_t)__rev0, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) int16x8_t vcvtnq_s16_f16(float16x8_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vcvtnq_s16_f16((int8x16_t)__p0, 33); + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) int16x8_t vcvtnq_s16_f16(float16x8_t __p0) { + int16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int16x8_t) __builtin_neon_vcvtnq_s16_f16((int8x16_t)__rev0, 33); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) int16x4_t vcvtn_s16_f16(float16x4_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vcvtn_s16_f16((int8x8_t)__p0, 1); + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) int16x4_t vcvtn_s16_f16(float16x4_t __p0) { + int16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (int16x4_t) __builtin_neon_vcvtn_s16_f16((int8x8_t)__rev0, 1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) uint16x8_t vcvtnq_u16_f16(float16x8_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vcvtnq_u16_f16((int8x16_t)__p0, 49); + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) uint16x8_t vcvtnq_u16_f16(float16x8_t __p0) { + uint16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint16x8_t) __builtin_neon_vcvtnq_u16_f16((int8x16_t)__rev0, 49); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) uint16x4_t vcvtn_u16_f16(float16x4_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vcvtn_u16_f16((int8x8_t)__p0, 17); + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) uint16x4_t vcvtn_u16_f16(float16x4_t __p0) { + uint16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (uint16x4_t) __builtin_neon_vcvtn_u16_f16((int8x8_t)__rev0, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) int16x8_t vcvtpq_s16_f16(float16x8_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vcvtpq_s16_f16((int8x16_t)__p0, 33); + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) int16x8_t vcvtpq_s16_f16(float16x8_t __p0) { + int16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int16x8_t) __builtin_neon_vcvtpq_s16_f16((int8x16_t)__rev0, 33); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) int16x4_t vcvtp_s16_f16(float16x4_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vcvtp_s16_f16((int8x8_t)__p0, 1); + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) int16x4_t vcvtp_s16_f16(float16x4_t __p0) { + int16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (int16x4_t) __builtin_neon_vcvtp_s16_f16((int8x8_t)__rev0, 1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) uint16x8_t vcvtpq_u16_f16(float16x8_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vcvtpq_u16_f16((int8x16_t)__p0, 49); + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) uint16x8_t vcvtpq_u16_f16(float16x8_t __p0) { + uint16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint16x8_t) __builtin_neon_vcvtpq_u16_f16((int8x16_t)__rev0, 49); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) uint16x4_t vcvtp_u16_f16(float16x4_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vcvtp_u16_f16((int8x8_t)__p0, 17); + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) uint16x4_t vcvtp_u16_f16(float16x4_t __p0) { + uint16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (uint16x4_t) __builtin_neon_vcvtp_u16_f16((int8x8_t)__rev0, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vextq_f16(__p0, __p1, __p2) __extension__ ({ \ + float16x8_t __ret; \ + float16x8_t __s0 = __p0; \ + float16x8_t __s1 = __p1; \ + __ret = (float16x8_t) __builtin_neon_vextq_f16((int8x16_t)__s0, (int8x16_t)__s1, __p2, 40); \ + __ret; \ +}) +#else +#define vextq_f16(__p0, __p1, __p2) __extension__ ({ \ + float16x8_t __ret; \ + float16x8_t __s0 = __p0; \ + float16x8_t __s1 = __p1; \ + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (float16x8_t) __builtin_neon_vextq_f16((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 40); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vext_f16(__p0, __p1, __p2) __extension__ ({ \ + float16x4_t __ret; \ + float16x4_t __s0 = __p0; \ + float16x4_t __s1 = __p1; \ + __ret = (float16x4_t) __builtin_neon_vext_f16((int8x8_t)__s0, (int8x8_t)__s1, __p2, 8); \ + __ret; \ +}) +#else +#define vext_f16(__p0, __p1, __p2) __extension__ ({ \ + float16x4_t __ret; \ + float16x4_t __s0 = __p0; \ + float16x4_t __s1 = __p1; \ + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + __ret = (float16x4_t) __builtin_neon_vext_f16((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 8); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) float16x8_t vfmaq_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) { + float16x8_t __ret; + __ret = (float16x8_t) __builtin_neon_vfmaq_f16((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 40); + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) float16x8_t vfmaq_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) { + float16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (float16x8_t) __builtin_neon_vfmaq_f16((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 40); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("fullfp16"))) float16x8_t __noswap_vfmaq_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) { + float16x8_t __ret; + __ret = (float16x8_t) __builtin_neon_vfmaq_f16((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 40); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) float16x4_t vfma_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) { + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vfma_f16((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 8); + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) float16x4_t vfma_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) { + float16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + float16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = (float16x4_t) __builtin_neon_vfma_f16((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 8); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("fullfp16"))) float16x4_t __noswap_vfma_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) { + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vfma_f16((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 8); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) float16x8_t vfmsq_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) { + float16x8_t __ret; + __ret = vfmaq_f16(__p0, -__p1, __p2); + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) float16x8_t vfmsq_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) { + float16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __noswap_vfmaq_f16(__rev0, -__rev1, __rev2); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) float16x4_t vfms_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) { + float16x4_t __ret; + __ret = vfma_f16(__p0, -__p1, __p2); + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) float16x4_t vfms_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) { + float16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + float16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = __noswap_vfma_f16(__rev0, -__rev1, __rev2); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) float16x8_t vmaxq_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + __ret = (float16x8_t) __builtin_neon_vmaxq_f16((int8x16_t)__p0, (int8x16_t)__p1, 40); + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) float16x8_t vmaxq_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (float16x8_t) __builtin_neon_vmaxq_f16((int8x16_t)__rev0, (int8x16_t)__rev1, 40); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) float16x4_t vmax_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vmax_f16((int8x8_t)__p0, (int8x8_t)__p1, 8); + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) float16x4_t vmax_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (float16x4_t) __builtin_neon_vmax_f16((int8x8_t)__rev0, (int8x8_t)__rev1, 8); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) float16x8_t vminq_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + __ret = (float16x8_t) __builtin_neon_vminq_f16((int8x16_t)__p0, (int8x16_t)__p1, 40); + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) float16x8_t vminq_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (float16x8_t) __builtin_neon_vminq_f16((int8x16_t)__rev0, (int8x16_t)__rev1, 40); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) float16x4_t vmin_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vmin_f16((int8x8_t)__p0, (int8x8_t)__p1, 8); + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) float16x4_t vmin_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (float16x4_t) __builtin_neon_vmin_f16((int8x8_t)__rev0, (int8x8_t)__rev1, 8); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) float16x8_t vmulq_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + __ret = __p0 * __p1; + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) float16x8_t vmulq_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 * __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) float16x4_t vmul_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + __ret = __p0 * __p1; + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) float16x4_t vmul_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __rev0 * __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmulq_lane_f16(__p0_153, __p1_153, __p2_153) __extension__ ({ \ + float16x8_t __ret_153; \ + float16x8_t __s0_153 = __p0_153; \ + float16x4_t __s1_153 = __p1_153; \ + __ret_153 = __s0_153 * splatq_lane_f16(__s1_153, __p2_153); \ + __ret_153; \ +}) +#else +#define vmulq_lane_f16(__p0_154, __p1_154, __p2_154) __extension__ ({ \ + float16x8_t __ret_154; \ + float16x8_t __s0_154 = __p0_154; \ + float16x4_t __s1_154 = __p1_154; \ + float16x8_t __rev0_154; __rev0_154 = __builtin_shufflevector(__s0_154, __s0_154, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x4_t __rev1_154; __rev1_154 = __builtin_shufflevector(__s1_154, __s1_154, 3, 2, 1, 0); \ + __ret_154 = __rev0_154 * __noswap_splatq_lane_f16(__rev1_154, __p2_154); \ + __ret_154 = __builtin_shufflevector(__ret_154, __ret_154, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_154; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmul_lane_f16(__p0_155, __p1_155, __p2_155) __extension__ ({ \ + float16x4_t __ret_155; \ + float16x4_t __s0_155 = __p0_155; \ + float16x4_t __s1_155 = __p1_155; \ + __ret_155 = __s0_155 * splat_lane_f16(__s1_155, __p2_155); \ + __ret_155; \ +}) +#else +#define vmul_lane_f16(__p0_156, __p1_156, __p2_156) __extension__ ({ \ + float16x4_t __ret_156; \ + float16x4_t __s0_156 = __p0_156; \ + float16x4_t __s1_156 = __p1_156; \ + float16x4_t __rev0_156; __rev0_156 = __builtin_shufflevector(__s0_156, __s0_156, 3, 2, 1, 0); \ + float16x4_t __rev1_156; __rev1_156 = __builtin_shufflevector(__s1_156, __s1_156, 3, 2, 1, 0); \ + __ret_156 = __rev0_156 * __noswap_splat_lane_f16(__rev1_156, __p2_156); \ + __ret_156 = __builtin_shufflevector(__ret_156, __ret_156, 3, 2, 1, 0); \ + __ret_156; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmulq_n_f16(__p0, __p1) __extension__ ({ \ + float16x8_t __ret; \ + float16x8_t __s0 = __p0; \ + float16_t __s1 = __p1; \ + __ret = __s0 * (float16x8_t) {__s1, __s1, __s1, __s1, __s1, __s1, __s1, __s1}; \ + __ret; \ +}) +#else +#define vmulq_n_f16(__p0, __p1) __extension__ ({ \ + float16x8_t __ret; \ + float16x8_t __s0 = __p0; \ + float16_t __s1 = __p1; \ + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = __rev0 * (float16x8_t) {__s1, __s1, __s1, __s1, __s1, __s1, __s1, __s1}; \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmul_n_f16(__p0, __p1) __extension__ ({ \ + float16x4_t __ret; \ + float16x4_t __s0 = __p0; \ + float16_t __s1 = __p1; \ + __ret = __s0 * (float16x4_t) {__s1, __s1, __s1, __s1}; \ + __ret; \ +}) +#else +#define vmul_n_f16(__p0, __p1) __extension__ ({ \ + float16x4_t __ret; \ + float16x4_t __s0 = __p0; \ + float16_t __s1 = __p1; \ + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + __ret = __rev0 * (float16x4_t) {__s1, __s1, __s1, __s1}; \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) float16x8_t vnegq_f16(float16x8_t __p0) { + float16x8_t __ret; + __ret = -__p0; + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) float16x8_t vnegq_f16(float16x8_t __p0) { + float16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = -__rev0; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) float16x4_t vneg_f16(float16x4_t __p0) { + float16x4_t __ret; + __ret = -__p0; + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) float16x4_t vneg_f16(float16x4_t __p0) { + float16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = -__rev0; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) float16x4_t vpadd_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vpadd_f16((int8x8_t)__p0, (int8x8_t)__p1, 8); + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) float16x4_t vpadd_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (float16x4_t) __builtin_neon_vpadd_f16((int8x8_t)__rev0, (int8x8_t)__rev1, 8); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) float16x4_t vpmax_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vpmax_f16((int8x8_t)__p0, (int8x8_t)__p1, 8); + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) float16x4_t vpmax_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (float16x4_t) __builtin_neon_vpmax_f16((int8x8_t)__rev0, (int8x8_t)__rev1, 8); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) float16x4_t vpmin_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vpmin_f16((int8x8_t)__p0, (int8x8_t)__p1, 8); + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) float16x4_t vpmin_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (float16x4_t) __builtin_neon_vpmin_f16((int8x8_t)__rev0, (int8x8_t)__rev1, 8); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) float16x8_t vrecpeq_f16(float16x8_t __p0) { + float16x8_t __ret; + __ret = (float16x8_t) __builtin_neon_vrecpeq_f16((int8x16_t)__p0, 40); + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) float16x8_t vrecpeq_f16(float16x8_t __p0) { + float16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (float16x8_t) __builtin_neon_vrecpeq_f16((int8x16_t)__rev0, 40); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) float16x4_t vrecpe_f16(float16x4_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vrecpe_f16((int8x8_t)__p0, 8); + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) float16x4_t vrecpe_f16(float16x4_t __p0) { + float16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (float16x4_t) __builtin_neon_vrecpe_f16((int8x8_t)__rev0, 8); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) float16x8_t vrecpsq_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + __ret = (float16x8_t) __builtin_neon_vrecpsq_f16((int8x16_t)__p0, (int8x16_t)__p1, 40); + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) float16x8_t vrecpsq_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (float16x8_t) __builtin_neon_vrecpsq_f16((int8x16_t)__rev0, (int8x16_t)__rev1, 40); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) float16x4_t vrecps_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vrecps_f16((int8x8_t)__p0, (int8x8_t)__p1, 8); + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) float16x4_t vrecps_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (float16x4_t) __builtin_neon_vrecps_f16((int8x8_t)__rev0, (int8x8_t)__rev1, 8); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) float16x8_t vrev64q_f16(float16x8_t __p0) { + float16x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0, 7, 6, 5, 4); + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) float16x8_t vrev64q_f16(float16x8_t __p0) { + float16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0, 7, 6, 5, 4); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) float16x4_t vrev64_f16(float16x4_t __p0) { + float16x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) float16x4_t vrev64_f16(float16x4_t __p0) { + float16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) float16x8_t vrsqrteq_f16(float16x8_t __p0) { + float16x8_t __ret; + __ret = (float16x8_t) __builtin_neon_vrsqrteq_f16((int8x16_t)__p0, 40); + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) float16x8_t vrsqrteq_f16(float16x8_t __p0) { + float16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (float16x8_t) __builtin_neon_vrsqrteq_f16((int8x16_t)__rev0, 40); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) float16x4_t vrsqrte_f16(float16x4_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vrsqrte_f16((int8x8_t)__p0, 8); + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) float16x4_t vrsqrte_f16(float16x4_t __p0) { + float16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (float16x4_t) __builtin_neon_vrsqrte_f16((int8x8_t)__rev0, 8); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) float16x8_t vrsqrtsq_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + __ret = (float16x8_t) __builtin_neon_vrsqrtsq_f16((int8x16_t)__p0, (int8x16_t)__p1, 40); + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) float16x8_t vrsqrtsq_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (float16x8_t) __builtin_neon_vrsqrtsq_f16((int8x16_t)__rev0, (int8x16_t)__rev1, 40); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) float16x4_t vrsqrts_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vrsqrts_f16((int8x8_t)__p0, (int8x8_t)__p1, 8); + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) float16x4_t vrsqrts_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (float16x4_t) __builtin_neon_vrsqrts_f16((int8x8_t)__rev0, (int8x8_t)__rev1, 8); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) float16x8_t vsubq_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + __ret = __p0 - __p1; + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) float16x8_t vsubq_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 - __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) float16x4_t vsub_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + __ret = __p0 - __p1; + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) float16x4_t vsub_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __rev0 - __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) float16x8x2_t vtrnq_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8x2_t __ret; + __builtin_neon_vtrnq_f16(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 40); + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) float16x8x2_t vtrnq_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8x2_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __builtin_neon_vtrnq_f16(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 40); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) float16x4x2_t vtrn_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4x2_t __ret; + __builtin_neon_vtrn_f16(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 8); + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) float16x4x2_t vtrn_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4x2_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __builtin_neon_vtrn_f16(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 8); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) float16x8x2_t vuzpq_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8x2_t __ret; + __builtin_neon_vuzpq_f16(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 40); + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) float16x8x2_t vuzpq_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8x2_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __builtin_neon_vuzpq_f16(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 40); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) float16x4x2_t vuzp_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4x2_t __ret; + __builtin_neon_vuzp_f16(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 8); + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) float16x4x2_t vuzp_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4x2_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __builtin_neon_vuzp_f16(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 8); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) float16x8x2_t vzipq_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8x2_t __ret; + __builtin_neon_vzipq_f16(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 40); + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) float16x8x2_t vzipq_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8x2_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __builtin_neon_vzipq_f16(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 40); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) float16x4x2_t vzip_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4x2_t __ret; + __builtin_neon_vzip_f16(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 8); + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) float16x4x2_t vzip_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4x2_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __builtin_neon_vzip_f16(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 8); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("i8mm"))) uint32x4_t vmmlaq_u32(uint32x4_t __p0, uint8x16_t __p1, uint8x16_t __p2) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vmmlaq_u32((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 50); + return __ret; +} +#else +__ai __attribute__((target("i8mm"))) uint32x4_t vmmlaq_u32(uint32x4_t __p0, uint8x16_t __p1, uint8x16_t __p2) { + uint32x4_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint32x4_t) __builtin_neon_vmmlaq_u32((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("i8mm"))) int32x4_t vmmlaq_s32(int32x4_t __p0, int8x16_t __p1, int8x16_t __p2) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vmmlaq_s32((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 34); + return __ret; +} +#else +__ai __attribute__((target("i8mm"))) int32x4_t vmmlaq_s32(int32x4_t __p0, int8x16_t __p1, int8x16_t __p2) { + int32x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int32x4_t) __builtin_neon_vmmlaq_s32((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 34); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("i8mm"))) int32x4_t vusdotq_s32(int32x4_t __p0, uint8x16_t __p1, int8x16_t __p2) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vusdotq_s32((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 34); + return __ret; +} +#else +__ai __attribute__((target("i8mm"))) int32x4_t vusdotq_s32(int32x4_t __p0, uint8x16_t __p1, int8x16_t __p2) { + int32x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int32x4_t) __builtin_neon_vusdotq_s32((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 34); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("i8mm"))) int32x4_t __noswap_vusdotq_s32(int32x4_t __p0, uint8x16_t __p1, int8x16_t __p2) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vusdotq_s32((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 34); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("i8mm"))) int32x2_t vusdot_s32(int32x2_t __p0, uint8x8_t __p1, int8x8_t __p2) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vusdot_s32((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 2); + return __ret; +} +#else +__ai __attribute__((target("i8mm"))) int32x2_t vusdot_s32(int32x2_t __p0, uint8x8_t __p1, int8x8_t __p2) { + int32x2_t __ret; + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int32x2_t) __builtin_neon_vusdot_s32((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai __attribute__((target("i8mm"))) int32x2_t __noswap_vusdot_s32(int32x2_t __p0, uint8x8_t __p1, int8x8_t __p2) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vusdot_s32((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 2); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vusdotq_lane_s32(__p0_157, __p1_157, __p2_157, __p3_157) __extension__ ({ \ + int32x4_t __ret_157; \ + int32x4_t __s0_157 = __p0_157; \ + uint8x16_t __s1_157 = __p1_157; \ + int8x8_t __s2_157 = __p2_157; \ +int8x8_t __reint_157 = __s2_157; \ + __ret_157 = vusdotq_s32(__s0_157, __s1_157, (int8x16_t)(splatq_lane_s32(*(int32x2_t *) &__reint_157, __p3_157))); \ + __ret_157; \ +}) +#else +#define vusdotq_lane_s32(__p0_158, __p1_158, __p2_158, __p3_158) __extension__ ({ \ + int32x4_t __ret_158; \ + int32x4_t __s0_158 = __p0_158; \ + uint8x16_t __s1_158 = __p1_158; \ + int8x8_t __s2_158 = __p2_158; \ + int32x4_t __rev0_158; __rev0_158 = __builtin_shufflevector(__s0_158, __s0_158, 3, 2, 1, 0); \ + uint8x16_t __rev1_158; __rev1_158 = __builtin_shufflevector(__s1_158, __s1_158, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + int8x8_t __rev2_158; __rev2_158 = __builtin_shufflevector(__s2_158, __s2_158, 7, 6, 5, 4, 3, 2, 1, 0); \ +int8x8_t __reint_158 = __rev2_158; \ + __ret_158 = __noswap_vusdotq_s32(__rev0_158, __rev1_158, (int8x16_t)(__noswap_splatq_lane_s32(*(int32x2_t *) &__reint_158, __p3_158))); \ + __ret_158 = __builtin_shufflevector(__ret_158, __ret_158, 3, 2, 1, 0); \ + __ret_158; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vusdot_lane_s32(__p0_159, __p1_159, __p2_159, __p3_159) __extension__ ({ \ + int32x2_t __ret_159; \ + int32x2_t __s0_159 = __p0_159; \ + uint8x8_t __s1_159 = __p1_159; \ + int8x8_t __s2_159 = __p2_159; \ +int8x8_t __reint_159 = __s2_159; \ + __ret_159 = vusdot_s32(__s0_159, __s1_159, (int8x8_t)(splat_lane_s32(*(int32x2_t *) &__reint_159, __p3_159))); \ + __ret_159; \ +}) +#else +#define vusdot_lane_s32(__p0_160, __p1_160, __p2_160, __p3_160) __extension__ ({ \ + int32x2_t __ret_160; \ + int32x2_t __s0_160 = __p0_160; \ + uint8x8_t __s1_160 = __p1_160; \ + int8x8_t __s2_160 = __p2_160; \ + int32x2_t __rev0_160; __rev0_160 = __builtin_shufflevector(__s0_160, __s0_160, 1, 0); \ + uint8x8_t __rev1_160; __rev1_160 = __builtin_shufflevector(__s1_160, __s1_160, 7, 6, 5, 4, 3, 2, 1, 0); \ + int8x8_t __rev2_160; __rev2_160 = __builtin_shufflevector(__s2_160, __s2_160, 7, 6, 5, 4, 3, 2, 1, 0); \ +int8x8_t __reint_160 = __rev2_160; \ + __ret_160 = __noswap_vusdot_s32(__rev0_160, __rev1_160, (int8x8_t)(__noswap_splat_lane_s32(*(int32x2_t *) &__reint_160, __p3_160))); \ + __ret_160 = __builtin_shufflevector(__ret_160, __ret_160, 1, 0); \ + __ret_160; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("i8mm"))) int32x4_t vusmmlaq_s32(int32x4_t __p0, uint8x16_t __p1, int8x16_t __p2) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vusmmlaq_s32((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 34); + return __ret; +} +#else +__ai __attribute__((target("i8mm"))) int32x4_t vusmmlaq_s32(int32x4_t __p0, uint8x16_t __p1, int8x16_t __p2) { + int32x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int32x4_t) __builtin_neon_vusmmlaq_s32((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 34); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("v8.1a"))) int32x4_t vqrdmlahq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vqrdmlahq_s32((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 34); + return __ret; +} +#else +__ai __attribute__((target("v8.1a"))) int32x4_t vqrdmlahq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) { + int32x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = (int32x4_t) __builtin_neon_vqrdmlahq_s32((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 34); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("v8.1a"))) int32x4_t __noswap_vqrdmlahq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vqrdmlahq_s32((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 34); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("v8.1a"))) int16x8_t vqrdmlahq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) { + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vqrdmlahq_s16((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 33); + return __ret; +} +#else +__ai __attribute__((target("v8.1a"))) int16x8_t vqrdmlahq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) { + int16x8_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int16x8_t) __builtin_neon_vqrdmlahq_s16((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 33); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("v8.1a"))) int16x8_t __noswap_vqrdmlahq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) { + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vqrdmlahq_s16((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 33); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("v8.1a"))) int32x2_t vqrdmlah_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vqrdmlah_s32((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 2); + return __ret; +} +#else +__ai __attribute__((target("v8.1a"))) int32x2_t vqrdmlah_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) { + int32x2_t __ret; + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + int32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + __ret = (int32x2_t) __builtin_neon_vqrdmlah_s32((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai __attribute__((target("v8.1a"))) int32x2_t __noswap_vqrdmlah_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vqrdmlah_s32((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 2); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("v8.1a"))) int16x4_t vqrdmlah_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vqrdmlah_s16((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 1); + return __ret; +} +#else +__ai __attribute__((target("v8.1a"))) int16x4_t vqrdmlah_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) { + int16x4_t __ret; + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = (int16x4_t) __builtin_neon_vqrdmlah_s16((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("v8.1a"))) int16x4_t __noswap_vqrdmlah_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vqrdmlah_s16((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 1); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrdmlahq_lane_s32(__p0_161, __p1_161, __p2_161, __p3_161) __extension__ ({ \ + int32x4_t __ret_161; \ + int32x4_t __s0_161 = __p0_161; \ + int32x4_t __s1_161 = __p1_161; \ + int32x2_t __s2_161 = __p2_161; \ + __ret_161 = vqrdmlahq_s32(__s0_161, __s1_161, splatq_lane_s32(__s2_161, __p3_161)); \ + __ret_161; \ +}) +#else +#define vqrdmlahq_lane_s32(__p0_162, __p1_162, __p2_162, __p3_162) __extension__ ({ \ + int32x4_t __ret_162; \ + int32x4_t __s0_162 = __p0_162; \ + int32x4_t __s1_162 = __p1_162; \ + int32x2_t __s2_162 = __p2_162; \ + int32x4_t __rev0_162; __rev0_162 = __builtin_shufflevector(__s0_162, __s0_162, 3, 2, 1, 0); \ + int32x4_t __rev1_162; __rev1_162 = __builtin_shufflevector(__s1_162, __s1_162, 3, 2, 1, 0); \ + int32x2_t __rev2_162; __rev2_162 = __builtin_shufflevector(__s2_162, __s2_162, 1, 0); \ + __ret_162 = __noswap_vqrdmlahq_s32(__rev0_162, __rev1_162, __noswap_splatq_lane_s32(__rev2_162, __p3_162)); \ + __ret_162 = __builtin_shufflevector(__ret_162, __ret_162, 3, 2, 1, 0); \ + __ret_162; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrdmlahq_lane_s16(__p0_163, __p1_163, __p2_163, __p3_163) __extension__ ({ \ + int16x8_t __ret_163; \ + int16x8_t __s0_163 = __p0_163; \ + int16x8_t __s1_163 = __p1_163; \ + int16x4_t __s2_163 = __p2_163; \ + __ret_163 = vqrdmlahq_s16(__s0_163, __s1_163, splatq_lane_s16(__s2_163, __p3_163)); \ + __ret_163; \ +}) +#else +#define vqrdmlahq_lane_s16(__p0_164, __p1_164, __p2_164, __p3_164) __extension__ ({ \ + int16x8_t __ret_164; \ + int16x8_t __s0_164 = __p0_164; \ + int16x8_t __s1_164 = __p1_164; \ + int16x4_t __s2_164 = __p2_164; \ + int16x8_t __rev0_164; __rev0_164 = __builtin_shufflevector(__s0_164, __s0_164, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __rev1_164; __rev1_164 = __builtin_shufflevector(__s1_164, __s1_164, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x4_t __rev2_164; __rev2_164 = __builtin_shufflevector(__s2_164, __s2_164, 3, 2, 1, 0); \ + __ret_164 = __noswap_vqrdmlahq_s16(__rev0_164, __rev1_164, __noswap_splatq_lane_s16(__rev2_164, __p3_164)); \ + __ret_164 = __builtin_shufflevector(__ret_164, __ret_164, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_164; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrdmlah_lane_s32(__p0_165, __p1_165, __p2_165, __p3_165) __extension__ ({ \ + int32x2_t __ret_165; \ + int32x2_t __s0_165 = __p0_165; \ + int32x2_t __s1_165 = __p1_165; \ + int32x2_t __s2_165 = __p2_165; \ + __ret_165 = vqrdmlah_s32(__s0_165, __s1_165, splat_lane_s32(__s2_165, __p3_165)); \ + __ret_165; \ +}) +#else +#define vqrdmlah_lane_s32(__p0_166, __p1_166, __p2_166, __p3_166) __extension__ ({ \ + int32x2_t __ret_166; \ + int32x2_t __s0_166 = __p0_166; \ + int32x2_t __s1_166 = __p1_166; \ + int32x2_t __s2_166 = __p2_166; \ + int32x2_t __rev0_166; __rev0_166 = __builtin_shufflevector(__s0_166, __s0_166, 1, 0); \ + int32x2_t __rev1_166; __rev1_166 = __builtin_shufflevector(__s1_166, __s1_166, 1, 0); \ + int32x2_t __rev2_166; __rev2_166 = __builtin_shufflevector(__s2_166, __s2_166, 1, 0); \ + __ret_166 = __noswap_vqrdmlah_s32(__rev0_166, __rev1_166, __noswap_splat_lane_s32(__rev2_166, __p3_166)); \ + __ret_166 = __builtin_shufflevector(__ret_166, __ret_166, 1, 0); \ + __ret_166; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrdmlah_lane_s16(__p0_167, __p1_167, __p2_167, __p3_167) __extension__ ({ \ + int16x4_t __ret_167; \ + int16x4_t __s0_167 = __p0_167; \ + int16x4_t __s1_167 = __p1_167; \ + int16x4_t __s2_167 = __p2_167; \ + __ret_167 = vqrdmlah_s16(__s0_167, __s1_167, splat_lane_s16(__s2_167, __p3_167)); \ + __ret_167; \ +}) +#else +#define vqrdmlah_lane_s16(__p0_168, __p1_168, __p2_168, __p3_168) __extension__ ({ \ + int16x4_t __ret_168; \ + int16x4_t __s0_168 = __p0_168; \ + int16x4_t __s1_168 = __p1_168; \ + int16x4_t __s2_168 = __p2_168; \ + int16x4_t __rev0_168; __rev0_168 = __builtin_shufflevector(__s0_168, __s0_168, 3, 2, 1, 0); \ + int16x4_t __rev1_168; __rev1_168 = __builtin_shufflevector(__s1_168, __s1_168, 3, 2, 1, 0); \ + int16x4_t __rev2_168; __rev2_168 = __builtin_shufflevector(__s2_168, __s2_168, 3, 2, 1, 0); \ + __ret_168 = __noswap_vqrdmlah_s16(__rev0_168, __rev1_168, __noswap_splat_lane_s16(__rev2_168, __p3_168)); \ + __ret_168 = __builtin_shufflevector(__ret_168, __ret_168, 3, 2, 1, 0); \ + __ret_168; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("v8.1a"))) int32x4_t vqrdmlshq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vqrdmlshq_s32((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 34); + return __ret; +} +#else +__ai __attribute__((target("v8.1a"))) int32x4_t vqrdmlshq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) { + int32x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = (int32x4_t) __builtin_neon_vqrdmlshq_s32((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 34); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("v8.1a"))) int32x4_t __noswap_vqrdmlshq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vqrdmlshq_s32((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 34); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("v8.1a"))) int16x8_t vqrdmlshq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) { + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vqrdmlshq_s16((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 33); + return __ret; +} +#else +__ai __attribute__((target("v8.1a"))) int16x8_t vqrdmlshq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) { + int16x8_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int16x8_t) __builtin_neon_vqrdmlshq_s16((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 33); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("v8.1a"))) int16x8_t __noswap_vqrdmlshq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) { + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vqrdmlshq_s16((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 33); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("v8.1a"))) int32x2_t vqrdmlsh_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vqrdmlsh_s32((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 2); + return __ret; +} +#else +__ai __attribute__((target("v8.1a"))) int32x2_t vqrdmlsh_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) { + int32x2_t __ret; + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + int32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + __ret = (int32x2_t) __builtin_neon_vqrdmlsh_s32((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai __attribute__((target("v8.1a"))) int32x2_t __noswap_vqrdmlsh_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vqrdmlsh_s32((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 2); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("v8.1a"))) int16x4_t vqrdmlsh_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vqrdmlsh_s16((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 1); + return __ret; +} +#else +__ai __attribute__((target("v8.1a"))) int16x4_t vqrdmlsh_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) { + int16x4_t __ret; + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = (int16x4_t) __builtin_neon_vqrdmlsh_s16((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("v8.1a"))) int16x4_t __noswap_vqrdmlsh_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vqrdmlsh_s16((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 1); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrdmlshq_lane_s32(__p0_169, __p1_169, __p2_169, __p3_169) __extension__ ({ \ + int32x4_t __ret_169; \ + int32x4_t __s0_169 = __p0_169; \ + int32x4_t __s1_169 = __p1_169; \ + int32x2_t __s2_169 = __p2_169; \ + __ret_169 = vqrdmlshq_s32(__s0_169, __s1_169, splatq_lane_s32(__s2_169, __p3_169)); \ + __ret_169; \ +}) +#else +#define vqrdmlshq_lane_s32(__p0_170, __p1_170, __p2_170, __p3_170) __extension__ ({ \ + int32x4_t __ret_170; \ + int32x4_t __s0_170 = __p0_170; \ + int32x4_t __s1_170 = __p1_170; \ + int32x2_t __s2_170 = __p2_170; \ + int32x4_t __rev0_170; __rev0_170 = __builtin_shufflevector(__s0_170, __s0_170, 3, 2, 1, 0); \ + int32x4_t __rev1_170; __rev1_170 = __builtin_shufflevector(__s1_170, __s1_170, 3, 2, 1, 0); \ + int32x2_t __rev2_170; __rev2_170 = __builtin_shufflevector(__s2_170, __s2_170, 1, 0); \ + __ret_170 = __noswap_vqrdmlshq_s32(__rev0_170, __rev1_170, __noswap_splatq_lane_s32(__rev2_170, __p3_170)); \ + __ret_170 = __builtin_shufflevector(__ret_170, __ret_170, 3, 2, 1, 0); \ + __ret_170; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrdmlshq_lane_s16(__p0_171, __p1_171, __p2_171, __p3_171) __extension__ ({ \ + int16x8_t __ret_171; \ + int16x8_t __s0_171 = __p0_171; \ + int16x8_t __s1_171 = __p1_171; \ + int16x4_t __s2_171 = __p2_171; \ + __ret_171 = vqrdmlshq_s16(__s0_171, __s1_171, splatq_lane_s16(__s2_171, __p3_171)); \ + __ret_171; \ +}) +#else +#define vqrdmlshq_lane_s16(__p0_172, __p1_172, __p2_172, __p3_172) __extension__ ({ \ + int16x8_t __ret_172; \ + int16x8_t __s0_172 = __p0_172; \ + int16x8_t __s1_172 = __p1_172; \ + int16x4_t __s2_172 = __p2_172; \ + int16x8_t __rev0_172; __rev0_172 = __builtin_shufflevector(__s0_172, __s0_172, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __rev1_172; __rev1_172 = __builtin_shufflevector(__s1_172, __s1_172, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x4_t __rev2_172; __rev2_172 = __builtin_shufflevector(__s2_172, __s2_172, 3, 2, 1, 0); \ + __ret_172 = __noswap_vqrdmlshq_s16(__rev0_172, __rev1_172, __noswap_splatq_lane_s16(__rev2_172, __p3_172)); \ + __ret_172 = __builtin_shufflevector(__ret_172, __ret_172, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_172; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrdmlsh_lane_s32(__p0_173, __p1_173, __p2_173, __p3_173) __extension__ ({ \ + int32x2_t __ret_173; \ + int32x2_t __s0_173 = __p0_173; \ + int32x2_t __s1_173 = __p1_173; \ + int32x2_t __s2_173 = __p2_173; \ + __ret_173 = vqrdmlsh_s32(__s0_173, __s1_173, splat_lane_s32(__s2_173, __p3_173)); \ + __ret_173; \ +}) +#else +#define vqrdmlsh_lane_s32(__p0_174, __p1_174, __p2_174, __p3_174) __extension__ ({ \ + int32x2_t __ret_174; \ + int32x2_t __s0_174 = __p0_174; \ + int32x2_t __s1_174 = __p1_174; \ + int32x2_t __s2_174 = __p2_174; \ + int32x2_t __rev0_174; __rev0_174 = __builtin_shufflevector(__s0_174, __s0_174, 1, 0); \ + int32x2_t __rev1_174; __rev1_174 = __builtin_shufflevector(__s1_174, __s1_174, 1, 0); \ + int32x2_t __rev2_174; __rev2_174 = __builtin_shufflevector(__s2_174, __s2_174, 1, 0); \ + __ret_174 = __noswap_vqrdmlsh_s32(__rev0_174, __rev1_174, __noswap_splat_lane_s32(__rev2_174, __p3_174)); \ + __ret_174 = __builtin_shufflevector(__ret_174, __ret_174, 1, 0); \ + __ret_174; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrdmlsh_lane_s16(__p0_175, __p1_175, __p2_175, __p3_175) __extension__ ({ \ + int16x4_t __ret_175; \ + int16x4_t __s0_175 = __p0_175; \ + int16x4_t __s1_175 = __p1_175; \ + int16x4_t __s2_175 = __p2_175; \ + __ret_175 = vqrdmlsh_s16(__s0_175, __s1_175, splat_lane_s16(__s2_175, __p3_175)); \ + __ret_175; \ +}) +#else +#define vqrdmlsh_lane_s16(__p0_176, __p1_176, __p2_176, __p3_176) __extension__ ({ \ + int16x4_t __ret_176; \ + int16x4_t __s0_176 = __p0_176; \ + int16x4_t __s1_176 = __p1_176; \ + int16x4_t __s2_176 = __p2_176; \ + int16x4_t __rev0_176; __rev0_176 = __builtin_shufflevector(__s0_176, __s0_176, 3, 2, 1, 0); \ + int16x4_t __rev1_176; __rev1_176 = __builtin_shufflevector(__s1_176, __s1_176, 3, 2, 1, 0); \ + int16x4_t __rev2_176; __rev2_176 = __builtin_shufflevector(__s2_176, __s2_176, 3, 2, 1, 0); \ + __ret_176 = __noswap_vqrdmlsh_s16(__rev0_176, __rev1_176, __noswap_splat_lane_s16(__rev2_176, __p3_176)); \ + __ret_176 = __builtin_shufflevector(__ret_176, __ret_176, 3, 2, 1, 0); \ + __ret_176; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("v8.3a"))) float32x2_t vcadd_rot270_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vcadd_rot270_f32((int8x8_t)__p0, (int8x8_t)__p1, 9); + return __ret; +} +#else +__ai __attribute__((target("v8.3a"))) float32x2_t vcadd_rot270_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (float32x2_t) __builtin_neon_vcadd_rot270_f32((int8x8_t)__rev0, (int8x8_t)__rev1, 9); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("v8.3a"))) float32x2_t vcadd_rot90_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vcadd_rot90_f32((int8x8_t)__p0, (int8x8_t)__p1, 9); + return __ret; +} +#else +__ai __attribute__((target("v8.3a"))) float32x2_t vcadd_rot90_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (float32x2_t) __builtin_neon_vcadd_rot90_f32((int8x8_t)__rev0, (int8x8_t)__rev1, 9); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("v8.3a"))) float32x4_t vcaddq_rot270_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vcaddq_rot270_f32((int8x16_t)__p0, (int8x16_t)__p1, 41); + return __ret; +} +#else +__ai __attribute__((target("v8.3a"))) float32x4_t vcaddq_rot270_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (float32x4_t) __builtin_neon_vcaddq_rot270_f32((int8x16_t)__rev0, (int8x16_t)__rev1, 41); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("v8.3a"))) float32x4_t vcaddq_rot90_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vcaddq_rot90_f32((int8x16_t)__p0, (int8x16_t)__p1, 41); + return __ret; +} +#else +__ai __attribute__((target("v8.3a"))) float32x4_t vcaddq_rot90_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (float32x4_t) __builtin_neon_vcaddq_rot90_f32((int8x16_t)__rev0, (int8x16_t)__rev1, 41); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("v8.3a"))) float32x4_t vcmlaq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vcmlaq_f32((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); + return __ret; +} +#else +__ai __attribute__((target("v8.3a"))) float32x4_t vcmlaq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) { + float32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + float32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = (float32x4_t) __builtin_neon_vcmlaq_f32((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("v8.3a"))) float32x4_t __noswap_vcmlaq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vcmlaq_f32((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("v8.3a"))) float32x2_t vcmla_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vcmla_f32((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9); + return __ret; +} +#else +__ai __attribute__((target("v8.3a"))) float32x2_t vcmla_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) { + float32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + float32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + __ret = (float32x2_t) __builtin_neon_vcmla_f32((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 9); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai __attribute__((target("v8.3a"))) float32x2_t __noswap_vcmla_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vcmla_f32((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcmla_lane_f32(__p0_177, __p1_177, __p2_177, __p3_177) __extension__ ({ \ + float32x2_t __ret_177; \ + float32x2_t __s0_177 = __p0_177; \ + float32x2_t __s1_177 = __p1_177; \ + float32x2_t __s2_177 = __p2_177; \ +float32x2_t __reint_177 = __s2_177; \ +uint64x1_t __reint1_177 = (uint64x1_t) {vget_lane_u64(*(uint64x1_t *) &__reint_177, __p3_177)}; \ + __ret_177 = vcmla_f32(__s0_177, __s1_177, *(float32x2_t *) &__reint1_177); \ + __ret_177; \ +}) +#else +#define vcmla_lane_f32(__p0_178, __p1_178, __p2_178, __p3_178) __extension__ ({ \ + float32x2_t __ret_178; \ + float32x2_t __s0_178 = __p0_178; \ + float32x2_t __s1_178 = __p1_178; \ + float32x2_t __s2_178 = __p2_178; \ + float32x2_t __rev0_178; __rev0_178 = __builtin_shufflevector(__s0_178, __s0_178, 1, 0); \ + float32x2_t __rev1_178; __rev1_178 = __builtin_shufflevector(__s1_178, __s1_178, 1, 0); \ + float32x2_t __rev2_178; __rev2_178 = __builtin_shufflevector(__s2_178, __s2_178, 1, 0); \ +float32x2_t __reint_178 = __rev2_178; \ +uint64x1_t __reint1_178 = (uint64x1_t) {vget_lane_u64(*(uint64x1_t *) &__reint_178, __p3_178)}; \ + __ret_178 = __noswap_vcmla_f32(__rev0_178, __rev1_178, *(float32x2_t *) &__reint1_178); \ + __ret_178 = __builtin_shufflevector(__ret_178, __ret_178, 1, 0); \ + __ret_178; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcmlaq_lane_f32(__p0_179, __p1_179, __p2_179, __p3_179) __extension__ ({ \ + float32x4_t __ret_179; \ + float32x4_t __s0_179 = __p0_179; \ + float32x4_t __s1_179 = __p1_179; \ + float32x2_t __s2_179 = __p2_179; \ +float32x2_t __reint_179 = __s2_179; \ +uint64x2_t __reint1_179 = (uint64x2_t) {vget_lane_u64(*(uint64x1_t *) &__reint_179, __p3_179), vget_lane_u64(*(uint64x1_t *) &__reint_179, __p3_179)}; \ + __ret_179 = vcmlaq_f32(__s0_179, __s1_179, *(float32x4_t *) &__reint1_179); \ + __ret_179; \ +}) +#else +#define vcmlaq_lane_f32(__p0_180, __p1_180, __p2_180, __p3_180) __extension__ ({ \ + float32x4_t __ret_180; \ + float32x4_t __s0_180 = __p0_180; \ + float32x4_t __s1_180 = __p1_180; \ + float32x2_t __s2_180 = __p2_180; \ + float32x4_t __rev0_180; __rev0_180 = __builtin_shufflevector(__s0_180, __s0_180, 3, 2, 1, 0); \ + float32x4_t __rev1_180; __rev1_180 = __builtin_shufflevector(__s1_180, __s1_180, 3, 2, 1, 0); \ + float32x2_t __rev2_180; __rev2_180 = __builtin_shufflevector(__s2_180, __s2_180, 1, 0); \ +float32x2_t __reint_180 = __rev2_180; \ +uint64x2_t __reint1_180 = (uint64x2_t) {vget_lane_u64(*(uint64x1_t *) &__reint_180, __p3_180), vget_lane_u64(*(uint64x1_t *) &__reint_180, __p3_180)}; \ + __ret_180 = __noswap_vcmlaq_f32(__rev0_180, __rev1_180, *(float32x4_t *) &__reint1_180); \ + __ret_180 = __builtin_shufflevector(__ret_180, __ret_180, 3, 2, 1, 0); \ + __ret_180; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcmla_laneq_f32(__p0_181, __p1_181, __p2_181, __p3_181) __extension__ ({ \ + float32x2_t __ret_181; \ + float32x2_t __s0_181 = __p0_181; \ + float32x2_t __s1_181 = __p1_181; \ + float32x4_t __s2_181 = __p2_181; \ +float32x4_t __reint_181 = __s2_181; \ +uint64x1_t __reint1_181 = (uint64x1_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_181, __p3_181)}; \ + __ret_181 = vcmla_f32(__s0_181, __s1_181, *(float32x2_t *) &__reint1_181); \ + __ret_181; \ +}) +#else +#define vcmla_laneq_f32(__p0_182, __p1_182, __p2_182, __p3_182) __extension__ ({ \ + float32x2_t __ret_182; \ + float32x2_t __s0_182 = __p0_182; \ + float32x2_t __s1_182 = __p1_182; \ + float32x4_t __s2_182 = __p2_182; \ + float32x2_t __rev0_182; __rev0_182 = __builtin_shufflevector(__s0_182, __s0_182, 1, 0); \ + float32x2_t __rev1_182; __rev1_182 = __builtin_shufflevector(__s1_182, __s1_182, 1, 0); \ + float32x4_t __rev2_182; __rev2_182 = __builtin_shufflevector(__s2_182, __s2_182, 3, 2, 1, 0); \ +float32x4_t __reint_182 = __rev2_182; \ +uint64x1_t __reint1_182 = (uint64x1_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_182, __p3_182)}; \ + __ret_182 = __noswap_vcmla_f32(__rev0_182, __rev1_182, *(float32x2_t *) &__reint1_182); \ + __ret_182 = __builtin_shufflevector(__ret_182, __ret_182, 1, 0); \ + __ret_182; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcmlaq_laneq_f32(__p0_183, __p1_183, __p2_183, __p3_183) __extension__ ({ \ + float32x4_t __ret_183; \ + float32x4_t __s0_183 = __p0_183; \ + float32x4_t __s1_183 = __p1_183; \ + float32x4_t __s2_183 = __p2_183; \ +float32x4_t __reint_183 = __s2_183; \ +uint64x2_t __reint1_183 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_183, __p3_183), vgetq_lane_u64(*(uint64x2_t *) &__reint_183, __p3_183)}; \ + __ret_183 = vcmlaq_f32(__s0_183, __s1_183, *(float32x4_t *) &__reint1_183); \ + __ret_183; \ +}) +#else +#define vcmlaq_laneq_f32(__p0_184, __p1_184, __p2_184, __p3_184) __extension__ ({ \ + float32x4_t __ret_184; \ + float32x4_t __s0_184 = __p0_184; \ + float32x4_t __s1_184 = __p1_184; \ + float32x4_t __s2_184 = __p2_184; \ + float32x4_t __rev0_184; __rev0_184 = __builtin_shufflevector(__s0_184, __s0_184, 3, 2, 1, 0); \ + float32x4_t __rev1_184; __rev1_184 = __builtin_shufflevector(__s1_184, __s1_184, 3, 2, 1, 0); \ + float32x4_t __rev2_184; __rev2_184 = __builtin_shufflevector(__s2_184, __s2_184, 3, 2, 1, 0); \ +float32x4_t __reint_184 = __rev2_184; \ +uint64x2_t __reint1_184 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_184, __p3_184), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_184, __p3_184)}; \ + __ret_184 = __noswap_vcmlaq_f32(__rev0_184, __rev1_184, *(float32x4_t *) &__reint1_184); \ + __ret_184 = __builtin_shufflevector(__ret_184, __ret_184, 3, 2, 1, 0); \ + __ret_184; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("v8.3a"))) float32x4_t vcmlaq_rot180_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vcmlaq_rot180_f32((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); + return __ret; +} +#else +__ai __attribute__((target("v8.3a"))) float32x4_t vcmlaq_rot180_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) { + float32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + float32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = (float32x4_t) __builtin_neon_vcmlaq_rot180_f32((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("v8.3a"))) float32x4_t __noswap_vcmlaq_rot180_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vcmlaq_rot180_f32((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("v8.3a"))) float32x2_t vcmla_rot180_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vcmla_rot180_f32((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9); + return __ret; +} +#else +__ai __attribute__((target("v8.3a"))) float32x2_t vcmla_rot180_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) { + float32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + float32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + __ret = (float32x2_t) __builtin_neon_vcmla_rot180_f32((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 9); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai __attribute__((target("v8.3a"))) float32x2_t __noswap_vcmla_rot180_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vcmla_rot180_f32((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcmla_rot180_lane_f32(__p0_185, __p1_185, __p2_185, __p3_185) __extension__ ({ \ + float32x2_t __ret_185; \ + float32x2_t __s0_185 = __p0_185; \ + float32x2_t __s1_185 = __p1_185; \ + float32x2_t __s2_185 = __p2_185; \ +float32x2_t __reint_185 = __s2_185; \ +uint64x1_t __reint1_185 = (uint64x1_t) {vget_lane_u64(*(uint64x1_t *) &__reint_185, __p3_185)}; \ + __ret_185 = vcmla_rot180_f32(__s0_185, __s1_185, *(float32x2_t *) &__reint1_185); \ + __ret_185; \ +}) +#else +#define vcmla_rot180_lane_f32(__p0_186, __p1_186, __p2_186, __p3_186) __extension__ ({ \ + float32x2_t __ret_186; \ + float32x2_t __s0_186 = __p0_186; \ + float32x2_t __s1_186 = __p1_186; \ + float32x2_t __s2_186 = __p2_186; \ + float32x2_t __rev0_186; __rev0_186 = __builtin_shufflevector(__s0_186, __s0_186, 1, 0); \ + float32x2_t __rev1_186; __rev1_186 = __builtin_shufflevector(__s1_186, __s1_186, 1, 0); \ + float32x2_t __rev2_186; __rev2_186 = __builtin_shufflevector(__s2_186, __s2_186, 1, 0); \ +float32x2_t __reint_186 = __rev2_186; \ +uint64x1_t __reint1_186 = (uint64x1_t) {vget_lane_u64(*(uint64x1_t *) &__reint_186, __p3_186)}; \ + __ret_186 = __noswap_vcmla_rot180_f32(__rev0_186, __rev1_186, *(float32x2_t *) &__reint1_186); \ + __ret_186 = __builtin_shufflevector(__ret_186, __ret_186, 1, 0); \ + __ret_186; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcmlaq_rot180_lane_f32(__p0_187, __p1_187, __p2_187, __p3_187) __extension__ ({ \ + float32x4_t __ret_187; \ + float32x4_t __s0_187 = __p0_187; \ + float32x4_t __s1_187 = __p1_187; \ + float32x2_t __s2_187 = __p2_187; \ +float32x2_t __reint_187 = __s2_187; \ +uint64x2_t __reint1_187 = (uint64x2_t) {vget_lane_u64(*(uint64x1_t *) &__reint_187, __p3_187), vget_lane_u64(*(uint64x1_t *) &__reint_187, __p3_187)}; \ + __ret_187 = vcmlaq_rot180_f32(__s0_187, __s1_187, *(float32x4_t *) &__reint1_187); \ + __ret_187; \ +}) +#else +#define vcmlaq_rot180_lane_f32(__p0_188, __p1_188, __p2_188, __p3_188) __extension__ ({ \ + float32x4_t __ret_188; \ + float32x4_t __s0_188 = __p0_188; \ + float32x4_t __s1_188 = __p1_188; \ + float32x2_t __s2_188 = __p2_188; \ + float32x4_t __rev0_188; __rev0_188 = __builtin_shufflevector(__s0_188, __s0_188, 3, 2, 1, 0); \ + float32x4_t __rev1_188; __rev1_188 = __builtin_shufflevector(__s1_188, __s1_188, 3, 2, 1, 0); \ + float32x2_t __rev2_188; __rev2_188 = __builtin_shufflevector(__s2_188, __s2_188, 1, 0); \ +float32x2_t __reint_188 = __rev2_188; \ +uint64x2_t __reint1_188 = (uint64x2_t) {vget_lane_u64(*(uint64x1_t *) &__reint_188, __p3_188), vget_lane_u64(*(uint64x1_t *) &__reint_188, __p3_188)}; \ + __ret_188 = __noswap_vcmlaq_rot180_f32(__rev0_188, __rev1_188, *(float32x4_t *) &__reint1_188); \ + __ret_188 = __builtin_shufflevector(__ret_188, __ret_188, 3, 2, 1, 0); \ + __ret_188; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcmla_rot180_laneq_f32(__p0_189, __p1_189, __p2_189, __p3_189) __extension__ ({ \ + float32x2_t __ret_189; \ + float32x2_t __s0_189 = __p0_189; \ + float32x2_t __s1_189 = __p1_189; \ + float32x4_t __s2_189 = __p2_189; \ +float32x4_t __reint_189 = __s2_189; \ +uint64x1_t __reint1_189 = (uint64x1_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_189, __p3_189)}; \ + __ret_189 = vcmla_rot180_f32(__s0_189, __s1_189, *(float32x2_t *) &__reint1_189); \ + __ret_189; \ +}) +#else +#define vcmla_rot180_laneq_f32(__p0_190, __p1_190, __p2_190, __p3_190) __extension__ ({ \ + float32x2_t __ret_190; \ + float32x2_t __s0_190 = __p0_190; \ + float32x2_t __s1_190 = __p1_190; \ + float32x4_t __s2_190 = __p2_190; \ + float32x2_t __rev0_190; __rev0_190 = __builtin_shufflevector(__s0_190, __s0_190, 1, 0); \ + float32x2_t __rev1_190; __rev1_190 = __builtin_shufflevector(__s1_190, __s1_190, 1, 0); \ + float32x4_t __rev2_190; __rev2_190 = __builtin_shufflevector(__s2_190, __s2_190, 3, 2, 1, 0); \ +float32x4_t __reint_190 = __rev2_190; \ +uint64x1_t __reint1_190 = (uint64x1_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_190, __p3_190)}; \ + __ret_190 = __noswap_vcmla_rot180_f32(__rev0_190, __rev1_190, *(float32x2_t *) &__reint1_190); \ + __ret_190 = __builtin_shufflevector(__ret_190, __ret_190, 1, 0); \ + __ret_190; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcmlaq_rot180_laneq_f32(__p0_191, __p1_191, __p2_191, __p3_191) __extension__ ({ \ + float32x4_t __ret_191; \ + float32x4_t __s0_191 = __p0_191; \ + float32x4_t __s1_191 = __p1_191; \ + float32x4_t __s2_191 = __p2_191; \ +float32x4_t __reint_191 = __s2_191; \ +uint64x2_t __reint1_191 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_191, __p3_191), vgetq_lane_u64(*(uint64x2_t *) &__reint_191, __p3_191)}; \ + __ret_191 = vcmlaq_rot180_f32(__s0_191, __s1_191, *(float32x4_t *) &__reint1_191); \ + __ret_191; \ +}) +#else +#define vcmlaq_rot180_laneq_f32(__p0_192, __p1_192, __p2_192, __p3_192) __extension__ ({ \ + float32x4_t __ret_192; \ + float32x4_t __s0_192 = __p0_192; \ + float32x4_t __s1_192 = __p1_192; \ + float32x4_t __s2_192 = __p2_192; \ + float32x4_t __rev0_192; __rev0_192 = __builtin_shufflevector(__s0_192, __s0_192, 3, 2, 1, 0); \ + float32x4_t __rev1_192; __rev1_192 = __builtin_shufflevector(__s1_192, __s1_192, 3, 2, 1, 0); \ + float32x4_t __rev2_192; __rev2_192 = __builtin_shufflevector(__s2_192, __s2_192, 3, 2, 1, 0); \ +float32x4_t __reint_192 = __rev2_192; \ +uint64x2_t __reint1_192 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_192, __p3_192), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_192, __p3_192)}; \ + __ret_192 = __noswap_vcmlaq_rot180_f32(__rev0_192, __rev1_192, *(float32x4_t *) &__reint1_192); \ + __ret_192 = __builtin_shufflevector(__ret_192, __ret_192, 3, 2, 1, 0); \ + __ret_192; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("v8.3a"))) float32x4_t vcmlaq_rot270_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vcmlaq_rot270_f32((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); + return __ret; +} +#else +__ai __attribute__((target("v8.3a"))) float32x4_t vcmlaq_rot270_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) { + float32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + float32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = (float32x4_t) __builtin_neon_vcmlaq_rot270_f32((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("v8.3a"))) float32x4_t __noswap_vcmlaq_rot270_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vcmlaq_rot270_f32((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("v8.3a"))) float32x2_t vcmla_rot270_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vcmla_rot270_f32((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9); + return __ret; +} +#else +__ai __attribute__((target("v8.3a"))) float32x2_t vcmla_rot270_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) { + float32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + float32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + __ret = (float32x2_t) __builtin_neon_vcmla_rot270_f32((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 9); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai __attribute__((target("v8.3a"))) float32x2_t __noswap_vcmla_rot270_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vcmla_rot270_f32((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcmla_rot270_lane_f32(__p0_193, __p1_193, __p2_193, __p3_193) __extension__ ({ \ + float32x2_t __ret_193; \ + float32x2_t __s0_193 = __p0_193; \ + float32x2_t __s1_193 = __p1_193; \ + float32x2_t __s2_193 = __p2_193; \ +float32x2_t __reint_193 = __s2_193; \ +uint64x1_t __reint1_193 = (uint64x1_t) {vget_lane_u64(*(uint64x1_t *) &__reint_193, __p3_193)}; \ + __ret_193 = vcmla_rot270_f32(__s0_193, __s1_193, *(float32x2_t *) &__reint1_193); \ + __ret_193; \ +}) +#else +#define vcmla_rot270_lane_f32(__p0_194, __p1_194, __p2_194, __p3_194) __extension__ ({ \ + float32x2_t __ret_194; \ + float32x2_t __s0_194 = __p0_194; \ + float32x2_t __s1_194 = __p1_194; \ + float32x2_t __s2_194 = __p2_194; \ + float32x2_t __rev0_194; __rev0_194 = __builtin_shufflevector(__s0_194, __s0_194, 1, 0); \ + float32x2_t __rev1_194; __rev1_194 = __builtin_shufflevector(__s1_194, __s1_194, 1, 0); \ + float32x2_t __rev2_194; __rev2_194 = __builtin_shufflevector(__s2_194, __s2_194, 1, 0); \ +float32x2_t __reint_194 = __rev2_194; \ +uint64x1_t __reint1_194 = (uint64x1_t) {vget_lane_u64(*(uint64x1_t *) &__reint_194, __p3_194)}; \ + __ret_194 = __noswap_vcmla_rot270_f32(__rev0_194, __rev1_194, *(float32x2_t *) &__reint1_194); \ + __ret_194 = __builtin_shufflevector(__ret_194, __ret_194, 1, 0); \ + __ret_194; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcmlaq_rot270_lane_f32(__p0_195, __p1_195, __p2_195, __p3_195) __extension__ ({ \ + float32x4_t __ret_195; \ + float32x4_t __s0_195 = __p0_195; \ + float32x4_t __s1_195 = __p1_195; \ + float32x2_t __s2_195 = __p2_195; \ +float32x2_t __reint_195 = __s2_195; \ +uint64x2_t __reint1_195 = (uint64x2_t) {vget_lane_u64(*(uint64x1_t *) &__reint_195, __p3_195), vget_lane_u64(*(uint64x1_t *) &__reint_195, __p3_195)}; \ + __ret_195 = vcmlaq_rot270_f32(__s0_195, __s1_195, *(float32x4_t *) &__reint1_195); \ + __ret_195; \ +}) +#else +#define vcmlaq_rot270_lane_f32(__p0_196, __p1_196, __p2_196, __p3_196) __extension__ ({ \ + float32x4_t __ret_196; \ + float32x4_t __s0_196 = __p0_196; \ + float32x4_t __s1_196 = __p1_196; \ + float32x2_t __s2_196 = __p2_196; \ + float32x4_t __rev0_196; __rev0_196 = __builtin_shufflevector(__s0_196, __s0_196, 3, 2, 1, 0); \ + float32x4_t __rev1_196; __rev1_196 = __builtin_shufflevector(__s1_196, __s1_196, 3, 2, 1, 0); \ + float32x2_t __rev2_196; __rev2_196 = __builtin_shufflevector(__s2_196, __s2_196, 1, 0); \ +float32x2_t __reint_196 = __rev2_196; \ +uint64x2_t __reint1_196 = (uint64x2_t) {vget_lane_u64(*(uint64x1_t *) &__reint_196, __p3_196), vget_lane_u64(*(uint64x1_t *) &__reint_196, __p3_196)}; \ + __ret_196 = __noswap_vcmlaq_rot270_f32(__rev0_196, __rev1_196, *(float32x4_t *) &__reint1_196); \ + __ret_196 = __builtin_shufflevector(__ret_196, __ret_196, 3, 2, 1, 0); \ + __ret_196; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcmla_rot270_laneq_f32(__p0_197, __p1_197, __p2_197, __p3_197) __extension__ ({ \ + float32x2_t __ret_197; \ + float32x2_t __s0_197 = __p0_197; \ + float32x2_t __s1_197 = __p1_197; \ + float32x4_t __s2_197 = __p2_197; \ +float32x4_t __reint_197 = __s2_197; \ +uint64x1_t __reint1_197 = (uint64x1_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_197, __p3_197)}; \ + __ret_197 = vcmla_rot270_f32(__s0_197, __s1_197, *(float32x2_t *) &__reint1_197); \ + __ret_197; \ +}) +#else +#define vcmla_rot270_laneq_f32(__p0_198, __p1_198, __p2_198, __p3_198) __extension__ ({ \ + float32x2_t __ret_198; \ + float32x2_t __s0_198 = __p0_198; \ + float32x2_t __s1_198 = __p1_198; \ + float32x4_t __s2_198 = __p2_198; \ + float32x2_t __rev0_198; __rev0_198 = __builtin_shufflevector(__s0_198, __s0_198, 1, 0); \ + float32x2_t __rev1_198; __rev1_198 = __builtin_shufflevector(__s1_198, __s1_198, 1, 0); \ + float32x4_t __rev2_198; __rev2_198 = __builtin_shufflevector(__s2_198, __s2_198, 3, 2, 1, 0); \ +float32x4_t __reint_198 = __rev2_198; \ +uint64x1_t __reint1_198 = (uint64x1_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_198, __p3_198)}; \ + __ret_198 = __noswap_vcmla_rot270_f32(__rev0_198, __rev1_198, *(float32x2_t *) &__reint1_198); \ + __ret_198 = __builtin_shufflevector(__ret_198, __ret_198, 1, 0); \ + __ret_198; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcmlaq_rot270_laneq_f32(__p0_199, __p1_199, __p2_199, __p3_199) __extension__ ({ \ + float32x4_t __ret_199; \ + float32x4_t __s0_199 = __p0_199; \ + float32x4_t __s1_199 = __p1_199; \ + float32x4_t __s2_199 = __p2_199; \ +float32x4_t __reint_199 = __s2_199; \ +uint64x2_t __reint1_199 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_199, __p3_199), vgetq_lane_u64(*(uint64x2_t *) &__reint_199, __p3_199)}; \ + __ret_199 = vcmlaq_rot270_f32(__s0_199, __s1_199, *(float32x4_t *) &__reint1_199); \ + __ret_199; \ +}) +#else +#define vcmlaq_rot270_laneq_f32(__p0_200, __p1_200, __p2_200, __p3_200) __extension__ ({ \ + float32x4_t __ret_200; \ + float32x4_t __s0_200 = __p0_200; \ + float32x4_t __s1_200 = __p1_200; \ + float32x4_t __s2_200 = __p2_200; \ + float32x4_t __rev0_200; __rev0_200 = __builtin_shufflevector(__s0_200, __s0_200, 3, 2, 1, 0); \ + float32x4_t __rev1_200; __rev1_200 = __builtin_shufflevector(__s1_200, __s1_200, 3, 2, 1, 0); \ + float32x4_t __rev2_200; __rev2_200 = __builtin_shufflevector(__s2_200, __s2_200, 3, 2, 1, 0); \ +float32x4_t __reint_200 = __rev2_200; \ +uint64x2_t __reint1_200 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_200, __p3_200), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_200, __p3_200)}; \ + __ret_200 = __noswap_vcmlaq_rot270_f32(__rev0_200, __rev1_200, *(float32x4_t *) &__reint1_200); \ + __ret_200 = __builtin_shufflevector(__ret_200, __ret_200, 3, 2, 1, 0); \ + __ret_200; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("v8.3a"))) float32x4_t vcmlaq_rot90_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vcmlaq_rot90_f32((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); + return __ret; +} +#else +__ai __attribute__((target("v8.3a"))) float32x4_t vcmlaq_rot90_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) { + float32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + float32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = (float32x4_t) __builtin_neon_vcmlaq_rot90_f32((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("v8.3a"))) float32x4_t __noswap_vcmlaq_rot90_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vcmlaq_rot90_f32((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("v8.3a"))) float32x2_t vcmla_rot90_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vcmla_rot90_f32((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9); + return __ret; +} +#else +__ai __attribute__((target("v8.3a"))) float32x2_t vcmla_rot90_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) { + float32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + float32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + __ret = (float32x2_t) __builtin_neon_vcmla_rot90_f32((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 9); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai __attribute__((target("v8.3a"))) float32x2_t __noswap_vcmla_rot90_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vcmla_rot90_f32((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcmla_rot90_lane_f32(__p0_201, __p1_201, __p2_201, __p3_201) __extension__ ({ \ + float32x2_t __ret_201; \ + float32x2_t __s0_201 = __p0_201; \ + float32x2_t __s1_201 = __p1_201; \ + float32x2_t __s2_201 = __p2_201; \ +float32x2_t __reint_201 = __s2_201; \ +uint64x1_t __reint1_201 = (uint64x1_t) {vget_lane_u64(*(uint64x1_t *) &__reint_201, __p3_201)}; \ + __ret_201 = vcmla_rot90_f32(__s0_201, __s1_201, *(float32x2_t *) &__reint1_201); \ + __ret_201; \ +}) +#else +#define vcmla_rot90_lane_f32(__p0_202, __p1_202, __p2_202, __p3_202) __extension__ ({ \ + float32x2_t __ret_202; \ + float32x2_t __s0_202 = __p0_202; \ + float32x2_t __s1_202 = __p1_202; \ + float32x2_t __s2_202 = __p2_202; \ + float32x2_t __rev0_202; __rev0_202 = __builtin_shufflevector(__s0_202, __s0_202, 1, 0); \ + float32x2_t __rev1_202; __rev1_202 = __builtin_shufflevector(__s1_202, __s1_202, 1, 0); \ + float32x2_t __rev2_202; __rev2_202 = __builtin_shufflevector(__s2_202, __s2_202, 1, 0); \ +float32x2_t __reint_202 = __rev2_202; \ +uint64x1_t __reint1_202 = (uint64x1_t) {vget_lane_u64(*(uint64x1_t *) &__reint_202, __p3_202)}; \ + __ret_202 = __noswap_vcmla_rot90_f32(__rev0_202, __rev1_202, *(float32x2_t *) &__reint1_202); \ + __ret_202 = __builtin_shufflevector(__ret_202, __ret_202, 1, 0); \ + __ret_202; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcmlaq_rot90_lane_f32(__p0_203, __p1_203, __p2_203, __p3_203) __extension__ ({ \ + float32x4_t __ret_203; \ + float32x4_t __s0_203 = __p0_203; \ + float32x4_t __s1_203 = __p1_203; \ + float32x2_t __s2_203 = __p2_203; \ +float32x2_t __reint_203 = __s2_203; \ +uint64x2_t __reint1_203 = (uint64x2_t) {vget_lane_u64(*(uint64x1_t *) &__reint_203, __p3_203), vget_lane_u64(*(uint64x1_t *) &__reint_203, __p3_203)}; \ + __ret_203 = vcmlaq_rot90_f32(__s0_203, __s1_203, *(float32x4_t *) &__reint1_203); \ + __ret_203; \ +}) +#else +#define vcmlaq_rot90_lane_f32(__p0_204, __p1_204, __p2_204, __p3_204) __extension__ ({ \ + float32x4_t __ret_204; \ + float32x4_t __s0_204 = __p0_204; \ + float32x4_t __s1_204 = __p1_204; \ + float32x2_t __s2_204 = __p2_204; \ + float32x4_t __rev0_204; __rev0_204 = __builtin_shufflevector(__s0_204, __s0_204, 3, 2, 1, 0); \ + float32x4_t __rev1_204; __rev1_204 = __builtin_shufflevector(__s1_204, __s1_204, 3, 2, 1, 0); \ + float32x2_t __rev2_204; __rev2_204 = __builtin_shufflevector(__s2_204, __s2_204, 1, 0); \ +float32x2_t __reint_204 = __rev2_204; \ +uint64x2_t __reint1_204 = (uint64x2_t) {vget_lane_u64(*(uint64x1_t *) &__reint_204, __p3_204), vget_lane_u64(*(uint64x1_t *) &__reint_204, __p3_204)}; \ + __ret_204 = __noswap_vcmlaq_rot90_f32(__rev0_204, __rev1_204, *(float32x4_t *) &__reint1_204); \ + __ret_204 = __builtin_shufflevector(__ret_204, __ret_204, 3, 2, 1, 0); \ + __ret_204; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcmla_rot90_laneq_f32(__p0_205, __p1_205, __p2_205, __p3_205) __extension__ ({ \ + float32x2_t __ret_205; \ + float32x2_t __s0_205 = __p0_205; \ + float32x2_t __s1_205 = __p1_205; \ + float32x4_t __s2_205 = __p2_205; \ +float32x4_t __reint_205 = __s2_205; \ +uint64x1_t __reint1_205 = (uint64x1_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_205, __p3_205)}; \ + __ret_205 = vcmla_rot90_f32(__s0_205, __s1_205, *(float32x2_t *) &__reint1_205); \ + __ret_205; \ +}) +#else +#define vcmla_rot90_laneq_f32(__p0_206, __p1_206, __p2_206, __p3_206) __extension__ ({ \ + float32x2_t __ret_206; \ + float32x2_t __s0_206 = __p0_206; \ + float32x2_t __s1_206 = __p1_206; \ + float32x4_t __s2_206 = __p2_206; \ + float32x2_t __rev0_206; __rev0_206 = __builtin_shufflevector(__s0_206, __s0_206, 1, 0); \ + float32x2_t __rev1_206; __rev1_206 = __builtin_shufflevector(__s1_206, __s1_206, 1, 0); \ + float32x4_t __rev2_206; __rev2_206 = __builtin_shufflevector(__s2_206, __s2_206, 3, 2, 1, 0); \ +float32x4_t __reint_206 = __rev2_206; \ +uint64x1_t __reint1_206 = (uint64x1_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_206, __p3_206)}; \ + __ret_206 = __noswap_vcmla_rot90_f32(__rev0_206, __rev1_206, *(float32x2_t *) &__reint1_206); \ + __ret_206 = __builtin_shufflevector(__ret_206, __ret_206, 1, 0); \ + __ret_206; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcmlaq_rot90_laneq_f32(__p0_207, __p1_207, __p2_207, __p3_207) __extension__ ({ \ + float32x4_t __ret_207; \ + float32x4_t __s0_207 = __p0_207; \ + float32x4_t __s1_207 = __p1_207; \ + float32x4_t __s2_207 = __p2_207; \ +float32x4_t __reint_207 = __s2_207; \ +uint64x2_t __reint1_207 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_207, __p3_207), vgetq_lane_u64(*(uint64x2_t *) &__reint_207, __p3_207)}; \ + __ret_207 = vcmlaq_rot90_f32(__s0_207, __s1_207, *(float32x4_t *) &__reint1_207); \ + __ret_207; \ +}) +#else +#define vcmlaq_rot90_laneq_f32(__p0_208, __p1_208, __p2_208, __p3_208) __extension__ ({ \ + float32x4_t __ret_208; \ + float32x4_t __s0_208 = __p0_208; \ + float32x4_t __s1_208 = __p1_208; \ + float32x4_t __s2_208 = __p2_208; \ + float32x4_t __rev0_208; __rev0_208 = __builtin_shufflevector(__s0_208, __s0_208, 3, 2, 1, 0); \ + float32x4_t __rev1_208; __rev1_208 = __builtin_shufflevector(__s1_208, __s1_208, 3, 2, 1, 0); \ + float32x4_t __rev2_208; __rev2_208 = __builtin_shufflevector(__s2_208, __s2_208, 3, 2, 1, 0); \ +float32x4_t __reint_208 = __rev2_208; \ +uint64x2_t __reint1_208 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_208, __p3_208), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_208, __p3_208)}; \ + __ret_208 = __noswap_vcmlaq_rot90_f32(__rev0_208, __rev1_208, *(float32x4_t *) &__reint1_208); \ + __ret_208 = __builtin_shufflevector(__ret_208, __ret_208, 3, 2, 1, 0); \ + __ret_208; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("v8.3a,fullfp16"))) float16x4_t vcadd_rot270_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vcadd_rot270_f16((int8x8_t)__p0, (int8x8_t)__p1, 8); + return __ret; +} +#else +__ai __attribute__((target("v8.3a,fullfp16"))) float16x4_t vcadd_rot270_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (float16x4_t) __builtin_neon_vcadd_rot270_f16((int8x8_t)__rev0, (int8x8_t)__rev1, 8); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("v8.3a,fullfp16"))) float16x4_t vcadd_rot90_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vcadd_rot90_f16((int8x8_t)__p0, (int8x8_t)__p1, 8); + return __ret; +} +#else +__ai __attribute__((target("v8.3a,fullfp16"))) float16x4_t vcadd_rot90_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (float16x4_t) __builtin_neon_vcadd_rot90_f16((int8x8_t)__rev0, (int8x8_t)__rev1, 8); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("v8.3a,fullfp16"))) float16x8_t vcaddq_rot270_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + __ret = (float16x8_t) __builtin_neon_vcaddq_rot270_f16((int8x16_t)__p0, (int8x16_t)__p1, 40); + return __ret; +} +#else +__ai __attribute__((target("v8.3a,fullfp16"))) float16x8_t vcaddq_rot270_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (float16x8_t) __builtin_neon_vcaddq_rot270_f16((int8x16_t)__rev0, (int8x16_t)__rev1, 40); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("v8.3a,fullfp16"))) float16x8_t vcaddq_rot90_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + __ret = (float16x8_t) __builtin_neon_vcaddq_rot90_f16((int8x16_t)__p0, (int8x16_t)__p1, 40); + return __ret; +} +#else +__ai __attribute__((target("v8.3a,fullfp16"))) float16x8_t vcaddq_rot90_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (float16x8_t) __builtin_neon_vcaddq_rot90_f16((int8x16_t)__rev0, (int8x16_t)__rev1, 40); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("v8.3a,fullfp16"))) float16x8_t vcmlaq_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) { + float16x8_t __ret; + __ret = (float16x8_t) __builtin_neon_vcmlaq_f16((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 40); + return __ret; +} +#else +__ai __attribute__((target("v8.3a,fullfp16"))) float16x8_t vcmlaq_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) { + float16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (float16x8_t) __builtin_neon_vcmlaq_f16((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 40); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("v8.3a,fullfp16"))) float16x8_t __noswap_vcmlaq_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) { + float16x8_t __ret; + __ret = (float16x8_t) __builtin_neon_vcmlaq_f16((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 40); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("v8.3a,fullfp16"))) float16x4_t vcmla_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) { + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vcmla_f16((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 8); + return __ret; +} +#else +__ai __attribute__((target("v8.3a,fullfp16"))) float16x4_t vcmla_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) { + float16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + float16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = (float16x4_t) __builtin_neon_vcmla_f16((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 8); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("v8.3a,fullfp16"))) float16x4_t __noswap_vcmla_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) { + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vcmla_f16((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 8); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcmla_lane_f16(__p0_209, __p1_209, __p2_209, __p3_209) __extension__ ({ \ + float16x4_t __ret_209; \ + float16x4_t __s0_209 = __p0_209; \ + float16x4_t __s1_209 = __p1_209; \ + float16x4_t __s2_209 = __p2_209; \ +float16x4_t __reint_209 = __s2_209; \ +uint32x2_t __reint1_209 = (uint32x2_t) {vget_lane_u32(*(uint32x2_t *) &__reint_209, __p3_209), vget_lane_u32(*(uint32x2_t *) &__reint_209, __p3_209)}; \ + __ret_209 = vcmla_f16(__s0_209, __s1_209, *(float16x4_t *) &__reint1_209); \ + __ret_209; \ +}) +#else +#define vcmla_lane_f16(__p0_210, __p1_210, __p2_210, __p3_210) __extension__ ({ \ + float16x4_t __ret_210; \ + float16x4_t __s0_210 = __p0_210; \ + float16x4_t __s1_210 = __p1_210; \ + float16x4_t __s2_210 = __p2_210; \ + float16x4_t __rev0_210; __rev0_210 = __builtin_shufflevector(__s0_210, __s0_210, 3, 2, 1, 0); \ + float16x4_t __rev1_210; __rev1_210 = __builtin_shufflevector(__s1_210, __s1_210, 3, 2, 1, 0); \ + float16x4_t __rev2_210; __rev2_210 = __builtin_shufflevector(__s2_210, __s2_210, 3, 2, 1, 0); \ +float16x4_t __reint_210 = __rev2_210; \ +uint32x2_t __reint1_210 = (uint32x2_t) {__noswap_vget_lane_u32(*(uint32x2_t *) &__reint_210, __p3_210), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_210, __p3_210)}; \ + __ret_210 = __noswap_vcmla_f16(__rev0_210, __rev1_210, *(float16x4_t *) &__reint1_210); \ + __ret_210 = __builtin_shufflevector(__ret_210, __ret_210, 3, 2, 1, 0); \ + __ret_210; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcmlaq_lane_f16(__p0_211, __p1_211, __p2_211, __p3_211) __extension__ ({ \ + float16x8_t __ret_211; \ + float16x8_t __s0_211 = __p0_211; \ + float16x8_t __s1_211 = __p1_211; \ + float16x4_t __s2_211 = __p2_211; \ +float16x4_t __reint_211 = __s2_211; \ +uint32x4_t __reint1_211 = (uint32x4_t) {vget_lane_u32(*(uint32x2_t *) &__reint_211, __p3_211), vget_lane_u32(*(uint32x2_t *) &__reint_211, __p3_211), vget_lane_u32(*(uint32x2_t *) &__reint_211, __p3_211), vget_lane_u32(*(uint32x2_t *) &__reint_211, __p3_211)}; \ + __ret_211 = vcmlaq_f16(__s0_211, __s1_211, *(float16x8_t *) &__reint1_211); \ + __ret_211; \ +}) +#else +#define vcmlaq_lane_f16(__p0_212, __p1_212, __p2_212, __p3_212) __extension__ ({ \ + float16x8_t __ret_212; \ + float16x8_t __s0_212 = __p0_212; \ + float16x8_t __s1_212 = __p1_212; \ + float16x4_t __s2_212 = __p2_212; \ + float16x8_t __rev0_212; __rev0_212 = __builtin_shufflevector(__s0_212, __s0_212, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x8_t __rev1_212; __rev1_212 = __builtin_shufflevector(__s1_212, __s1_212, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x4_t __rev2_212; __rev2_212 = __builtin_shufflevector(__s2_212, __s2_212, 3, 2, 1, 0); \ +float16x4_t __reint_212 = __rev2_212; \ +uint32x4_t __reint1_212 = (uint32x4_t) {__noswap_vget_lane_u32(*(uint32x2_t *) &__reint_212, __p3_212), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_212, __p3_212), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_212, __p3_212), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_212, __p3_212)}; \ + __ret_212 = __noswap_vcmlaq_f16(__rev0_212, __rev1_212, *(float16x8_t *) &__reint1_212); \ + __ret_212 = __builtin_shufflevector(__ret_212, __ret_212, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_212; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcmla_laneq_f16(__p0_213, __p1_213, __p2_213, __p3_213) __extension__ ({ \ + float16x4_t __ret_213; \ + float16x4_t __s0_213 = __p0_213; \ + float16x4_t __s1_213 = __p1_213; \ + float16x8_t __s2_213 = __p2_213; \ +float16x8_t __reint_213 = __s2_213; \ +uint32x2_t __reint1_213 = (uint32x2_t) {vgetq_lane_u32(*(uint32x4_t *) &__reint_213, __p3_213), vgetq_lane_u32(*(uint32x4_t *) &__reint_213, __p3_213)}; \ + __ret_213 = vcmla_f16(__s0_213, __s1_213, *(float16x4_t *) &__reint1_213); \ + __ret_213; \ +}) +#else +#define vcmla_laneq_f16(__p0_214, __p1_214, __p2_214, __p3_214) __extension__ ({ \ + float16x4_t __ret_214; \ + float16x4_t __s0_214 = __p0_214; \ + float16x4_t __s1_214 = __p1_214; \ + float16x8_t __s2_214 = __p2_214; \ + float16x4_t __rev0_214; __rev0_214 = __builtin_shufflevector(__s0_214, __s0_214, 3, 2, 1, 0); \ + float16x4_t __rev1_214; __rev1_214 = __builtin_shufflevector(__s1_214, __s1_214, 3, 2, 1, 0); \ + float16x8_t __rev2_214; __rev2_214 = __builtin_shufflevector(__s2_214, __s2_214, 7, 6, 5, 4, 3, 2, 1, 0); \ +float16x8_t __reint_214 = __rev2_214; \ +uint32x2_t __reint1_214 = (uint32x2_t) {__noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_214, __p3_214), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_214, __p3_214)}; \ + __ret_214 = __noswap_vcmla_f16(__rev0_214, __rev1_214, *(float16x4_t *) &__reint1_214); \ + __ret_214 = __builtin_shufflevector(__ret_214, __ret_214, 3, 2, 1, 0); \ + __ret_214; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcmlaq_laneq_f16(__p0_215, __p1_215, __p2_215, __p3_215) __extension__ ({ \ + float16x8_t __ret_215; \ + float16x8_t __s0_215 = __p0_215; \ + float16x8_t __s1_215 = __p1_215; \ + float16x8_t __s2_215 = __p2_215; \ +float16x8_t __reint_215 = __s2_215; \ +uint32x4_t __reint1_215 = (uint32x4_t) {vgetq_lane_u32(*(uint32x4_t *) &__reint_215, __p3_215), vgetq_lane_u32(*(uint32x4_t *) &__reint_215, __p3_215), vgetq_lane_u32(*(uint32x4_t *) &__reint_215, __p3_215), vgetq_lane_u32(*(uint32x4_t *) &__reint_215, __p3_215)}; \ + __ret_215 = vcmlaq_f16(__s0_215, __s1_215, *(float16x8_t *) &__reint1_215); \ + __ret_215; \ +}) +#else +#define vcmlaq_laneq_f16(__p0_216, __p1_216, __p2_216, __p3_216) __extension__ ({ \ + float16x8_t __ret_216; \ + float16x8_t __s0_216 = __p0_216; \ + float16x8_t __s1_216 = __p1_216; \ + float16x8_t __s2_216 = __p2_216; \ + float16x8_t __rev0_216; __rev0_216 = __builtin_shufflevector(__s0_216, __s0_216, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x8_t __rev1_216; __rev1_216 = __builtin_shufflevector(__s1_216, __s1_216, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x8_t __rev2_216; __rev2_216 = __builtin_shufflevector(__s2_216, __s2_216, 7, 6, 5, 4, 3, 2, 1, 0); \ +float16x8_t __reint_216 = __rev2_216; \ +uint32x4_t __reint1_216 = (uint32x4_t) {__noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_216, __p3_216), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_216, __p3_216), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_216, __p3_216), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_216, __p3_216)}; \ + __ret_216 = __noswap_vcmlaq_f16(__rev0_216, __rev1_216, *(float16x8_t *) &__reint1_216); \ + __ret_216 = __builtin_shufflevector(__ret_216, __ret_216, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_216; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("v8.3a,fullfp16"))) float16x8_t vcmlaq_rot180_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) { + float16x8_t __ret; + __ret = (float16x8_t) __builtin_neon_vcmlaq_rot180_f16((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 40); + return __ret; +} +#else +__ai __attribute__((target("v8.3a,fullfp16"))) float16x8_t vcmlaq_rot180_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) { + float16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (float16x8_t) __builtin_neon_vcmlaq_rot180_f16((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 40); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("v8.3a,fullfp16"))) float16x8_t __noswap_vcmlaq_rot180_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) { + float16x8_t __ret; + __ret = (float16x8_t) __builtin_neon_vcmlaq_rot180_f16((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 40); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("v8.3a,fullfp16"))) float16x4_t vcmla_rot180_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) { + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vcmla_rot180_f16((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 8); + return __ret; +} +#else +__ai __attribute__((target("v8.3a,fullfp16"))) float16x4_t vcmla_rot180_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) { + float16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + float16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = (float16x4_t) __builtin_neon_vcmla_rot180_f16((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 8); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("v8.3a,fullfp16"))) float16x4_t __noswap_vcmla_rot180_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) { + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vcmla_rot180_f16((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 8); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcmla_rot180_lane_f16(__p0_217, __p1_217, __p2_217, __p3_217) __extension__ ({ \ + float16x4_t __ret_217; \ + float16x4_t __s0_217 = __p0_217; \ + float16x4_t __s1_217 = __p1_217; \ + float16x4_t __s2_217 = __p2_217; \ +float16x4_t __reint_217 = __s2_217; \ +uint32x2_t __reint1_217 = (uint32x2_t) {vget_lane_u32(*(uint32x2_t *) &__reint_217, __p3_217), vget_lane_u32(*(uint32x2_t *) &__reint_217, __p3_217)}; \ + __ret_217 = vcmla_rot180_f16(__s0_217, __s1_217, *(float16x4_t *) &__reint1_217); \ + __ret_217; \ +}) +#else +#define vcmla_rot180_lane_f16(__p0_218, __p1_218, __p2_218, __p3_218) __extension__ ({ \ + float16x4_t __ret_218; \ + float16x4_t __s0_218 = __p0_218; \ + float16x4_t __s1_218 = __p1_218; \ + float16x4_t __s2_218 = __p2_218; \ + float16x4_t __rev0_218; __rev0_218 = __builtin_shufflevector(__s0_218, __s0_218, 3, 2, 1, 0); \ + float16x4_t __rev1_218; __rev1_218 = __builtin_shufflevector(__s1_218, __s1_218, 3, 2, 1, 0); \ + float16x4_t __rev2_218; __rev2_218 = __builtin_shufflevector(__s2_218, __s2_218, 3, 2, 1, 0); \ +float16x4_t __reint_218 = __rev2_218; \ +uint32x2_t __reint1_218 = (uint32x2_t) {__noswap_vget_lane_u32(*(uint32x2_t *) &__reint_218, __p3_218), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_218, __p3_218)}; \ + __ret_218 = __noswap_vcmla_rot180_f16(__rev0_218, __rev1_218, *(float16x4_t *) &__reint1_218); \ + __ret_218 = __builtin_shufflevector(__ret_218, __ret_218, 3, 2, 1, 0); \ + __ret_218; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcmlaq_rot180_lane_f16(__p0_219, __p1_219, __p2_219, __p3_219) __extension__ ({ \ + float16x8_t __ret_219; \ + float16x8_t __s0_219 = __p0_219; \ + float16x8_t __s1_219 = __p1_219; \ + float16x4_t __s2_219 = __p2_219; \ +float16x4_t __reint_219 = __s2_219; \ +uint32x4_t __reint1_219 = (uint32x4_t) {vget_lane_u32(*(uint32x2_t *) &__reint_219, __p3_219), vget_lane_u32(*(uint32x2_t *) &__reint_219, __p3_219), vget_lane_u32(*(uint32x2_t *) &__reint_219, __p3_219), vget_lane_u32(*(uint32x2_t *) &__reint_219, __p3_219)}; \ + __ret_219 = vcmlaq_rot180_f16(__s0_219, __s1_219, *(float16x8_t *) &__reint1_219); \ + __ret_219; \ +}) +#else +#define vcmlaq_rot180_lane_f16(__p0_220, __p1_220, __p2_220, __p3_220) __extension__ ({ \ + float16x8_t __ret_220; \ + float16x8_t __s0_220 = __p0_220; \ + float16x8_t __s1_220 = __p1_220; \ + float16x4_t __s2_220 = __p2_220; \ + float16x8_t __rev0_220; __rev0_220 = __builtin_shufflevector(__s0_220, __s0_220, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x8_t __rev1_220; __rev1_220 = __builtin_shufflevector(__s1_220, __s1_220, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x4_t __rev2_220; __rev2_220 = __builtin_shufflevector(__s2_220, __s2_220, 3, 2, 1, 0); \ +float16x4_t __reint_220 = __rev2_220; \ +uint32x4_t __reint1_220 = (uint32x4_t) {__noswap_vget_lane_u32(*(uint32x2_t *) &__reint_220, __p3_220), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_220, __p3_220), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_220, __p3_220), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_220, __p3_220)}; \ + __ret_220 = __noswap_vcmlaq_rot180_f16(__rev0_220, __rev1_220, *(float16x8_t *) &__reint1_220); \ + __ret_220 = __builtin_shufflevector(__ret_220, __ret_220, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_220; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcmla_rot180_laneq_f16(__p0_221, __p1_221, __p2_221, __p3_221) __extension__ ({ \ + float16x4_t __ret_221; \ + float16x4_t __s0_221 = __p0_221; \ + float16x4_t __s1_221 = __p1_221; \ + float16x8_t __s2_221 = __p2_221; \ +float16x8_t __reint_221 = __s2_221; \ +uint32x2_t __reint1_221 = (uint32x2_t) {vgetq_lane_u32(*(uint32x4_t *) &__reint_221, __p3_221), vgetq_lane_u32(*(uint32x4_t *) &__reint_221, __p3_221)}; \ + __ret_221 = vcmla_rot180_f16(__s0_221, __s1_221, *(float16x4_t *) &__reint1_221); \ + __ret_221; \ +}) +#else +#define vcmla_rot180_laneq_f16(__p0_222, __p1_222, __p2_222, __p3_222) __extension__ ({ \ + float16x4_t __ret_222; \ + float16x4_t __s0_222 = __p0_222; \ + float16x4_t __s1_222 = __p1_222; \ + float16x8_t __s2_222 = __p2_222; \ + float16x4_t __rev0_222; __rev0_222 = __builtin_shufflevector(__s0_222, __s0_222, 3, 2, 1, 0); \ + float16x4_t __rev1_222; __rev1_222 = __builtin_shufflevector(__s1_222, __s1_222, 3, 2, 1, 0); \ + float16x8_t __rev2_222; __rev2_222 = __builtin_shufflevector(__s2_222, __s2_222, 7, 6, 5, 4, 3, 2, 1, 0); \ +float16x8_t __reint_222 = __rev2_222; \ +uint32x2_t __reint1_222 = (uint32x2_t) {__noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_222, __p3_222), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_222, __p3_222)}; \ + __ret_222 = __noswap_vcmla_rot180_f16(__rev0_222, __rev1_222, *(float16x4_t *) &__reint1_222); \ + __ret_222 = __builtin_shufflevector(__ret_222, __ret_222, 3, 2, 1, 0); \ + __ret_222; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcmlaq_rot180_laneq_f16(__p0_223, __p1_223, __p2_223, __p3_223) __extension__ ({ \ + float16x8_t __ret_223; \ + float16x8_t __s0_223 = __p0_223; \ + float16x8_t __s1_223 = __p1_223; \ + float16x8_t __s2_223 = __p2_223; \ +float16x8_t __reint_223 = __s2_223; \ +uint32x4_t __reint1_223 = (uint32x4_t) {vgetq_lane_u32(*(uint32x4_t *) &__reint_223, __p3_223), vgetq_lane_u32(*(uint32x4_t *) &__reint_223, __p3_223), vgetq_lane_u32(*(uint32x4_t *) &__reint_223, __p3_223), vgetq_lane_u32(*(uint32x4_t *) &__reint_223, __p3_223)}; \ + __ret_223 = vcmlaq_rot180_f16(__s0_223, __s1_223, *(float16x8_t *) &__reint1_223); \ + __ret_223; \ +}) +#else +#define vcmlaq_rot180_laneq_f16(__p0_224, __p1_224, __p2_224, __p3_224) __extension__ ({ \ + float16x8_t __ret_224; \ + float16x8_t __s0_224 = __p0_224; \ + float16x8_t __s1_224 = __p1_224; \ + float16x8_t __s2_224 = __p2_224; \ + float16x8_t __rev0_224; __rev0_224 = __builtin_shufflevector(__s0_224, __s0_224, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x8_t __rev1_224; __rev1_224 = __builtin_shufflevector(__s1_224, __s1_224, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x8_t __rev2_224; __rev2_224 = __builtin_shufflevector(__s2_224, __s2_224, 7, 6, 5, 4, 3, 2, 1, 0); \ +float16x8_t __reint_224 = __rev2_224; \ +uint32x4_t __reint1_224 = (uint32x4_t) {__noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_224, __p3_224), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_224, __p3_224), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_224, __p3_224), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_224, __p3_224)}; \ + __ret_224 = __noswap_vcmlaq_rot180_f16(__rev0_224, __rev1_224, *(float16x8_t *) &__reint1_224); \ + __ret_224 = __builtin_shufflevector(__ret_224, __ret_224, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_224; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("v8.3a,fullfp16"))) float16x8_t vcmlaq_rot270_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) { + float16x8_t __ret; + __ret = (float16x8_t) __builtin_neon_vcmlaq_rot270_f16((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 40); + return __ret; +} +#else +__ai __attribute__((target("v8.3a,fullfp16"))) float16x8_t vcmlaq_rot270_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) { + float16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (float16x8_t) __builtin_neon_vcmlaq_rot270_f16((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 40); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("v8.3a,fullfp16"))) float16x8_t __noswap_vcmlaq_rot270_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) { + float16x8_t __ret; + __ret = (float16x8_t) __builtin_neon_vcmlaq_rot270_f16((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 40); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("v8.3a,fullfp16"))) float16x4_t vcmla_rot270_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) { + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vcmla_rot270_f16((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 8); + return __ret; +} +#else +__ai __attribute__((target("v8.3a,fullfp16"))) float16x4_t vcmla_rot270_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) { + float16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + float16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = (float16x4_t) __builtin_neon_vcmla_rot270_f16((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 8); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("v8.3a,fullfp16"))) float16x4_t __noswap_vcmla_rot270_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) { + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vcmla_rot270_f16((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 8); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcmla_rot270_lane_f16(__p0_225, __p1_225, __p2_225, __p3_225) __extension__ ({ \ + float16x4_t __ret_225; \ + float16x4_t __s0_225 = __p0_225; \ + float16x4_t __s1_225 = __p1_225; \ + float16x4_t __s2_225 = __p2_225; \ +float16x4_t __reint_225 = __s2_225; \ +uint32x2_t __reint1_225 = (uint32x2_t) {vget_lane_u32(*(uint32x2_t *) &__reint_225, __p3_225), vget_lane_u32(*(uint32x2_t *) &__reint_225, __p3_225)}; \ + __ret_225 = vcmla_rot270_f16(__s0_225, __s1_225, *(float16x4_t *) &__reint1_225); \ + __ret_225; \ +}) +#else +#define vcmla_rot270_lane_f16(__p0_226, __p1_226, __p2_226, __p3_226) __extension__ ({ \ + float16x4_t __ret_226; \ + float16x4_t __s0_226 = __p0_226; \ + float16x4_t __s1_226 = __p1_226; \ + float16x4_t __s2_226 = __p2_226; \ + float16x4_t __rev0_226; __rev0_226 = __builtin_shufflevector(__s0_226, __s0_226, 3, 2, 1, 0); \ + float16x4_t __rev1_226; __rev1_226 = __builtin_shufflevector(__s1_226, __s1_226, 3, 2, 1, 0); \ + float16x4_t __rev2_226; __rev2_226 = __builtin_shufflevector(__s2_226, __s2_226, 3, 2, 1, 0); \ +float16x4_t __reint_226 = __rev2_226; \ +uint32x2_t __reint1_226 = (uint32x2_t) {__noswap_vget_lane_u32(*(uint32x2_t *) &__reint_226, __p3_226), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_226, __p3_226)}; \ + __ret_226 = __noswap_vcmla_rot270_f16(__rev0_226, __rev1_226, *(float16x4_t *) &__reint1_226); \ + __ret_226 = __builtin_shufflevector(__ret_226, __ret_226, 3, 2, 1, 0); \ + __ret_226; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcmlaq_rot270_lane_f16(__p0_227, __p1_227, __p2_227, __p3_227) __extension__ ({ \ + float16x8_t __ret_227; \ + float16x8_t __s0_227 = __p0_227; \ + float16x8_t __s1_227 = __p1_227; \ + float16x4_t __s2_227 = __p2_227; \ +float16x4_t __reint_227 = __s2_227; \ +uint32x4_t __reint1_227 = (uint32x4_t) {vget_lane_u32(*(uint32x2_t *) &__reint_227, __p3_227), vget_lane_u32(*(uint32x2_t *) &__reint_227, __p3_227), vget_lane_u32(*(uint32x2_t *) &__reint_227, __p3_227), vget_lane_u32(*(uint32x2_t *) &__reint_227, __p3_227)}; \ + __ret_227 = vcmlaq_rot270_f16(__s0_227, __s1_227, *(float16x8_t *) &__reint1_227); \ + __ret_227; \ +}) +#else +#define vcmlaq_rot270_lane_f16(__p0_228, __p1_228, __p2_228, __p3_228) __extension__ ({ \ + float16x8_t __ret_228; \ + float16x8_t __s0_228 = __p0_228; \ + float16x8_t __s1_228 = __p1_228; \ + float16x4_t __s2_228 = __p2_228; \ + float16x8_t __rev0_228; __rev0_228 = __builtin_shufflevector(__s0_228, __s0_228, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x8_t __rev1_228; __rev1_228 = __builtin_shufflevector(__s1_228, __s1_228, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x4_t __rev2_228; __rev2_228 = __builtin_shufflevector(__s2_228, __s2_228, 3, 2, 1, 0); \ +float16x4_t __reint_228 = __rev2_228; \ +uint32x4_t __reint1_228 = (uint32x4_t) {__noswap_vget_lane_u32(*(uint32x2_t *) &__reint_228, __p3_228), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_228, __p3_228), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_228, __p3_228), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_228, __p3_228)}; \ + __ret_228 = __noswap_vcmlaq_rot270_f16(__rev0_228, __rev1_228, *(float16x8_t *) &__reint1_228); \ + __ret_228 = __builtin_shufflevector(__ret_228, __ret_228, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_228; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcmla_rot270_laneq_f16(__p0_229, __p1_229, __p2_229, __p3_229) __extension__ ({ \ + float16x4_t __ret_229; \ + float16x4_t __s0_229 = __p0_229; \ + float16x4_t __s1_229 = __p1_229; \ + float16x8_t __s2_229 = __p2_229; \ +float16x8_t __reint_229 = __s2_229; \ +uint32x2_t __reint1_229 = (uint32x2_t) {vgetq_lane_u32(*(uint32x4_t *) &__reint_229, __p3_229), vgetq_lane_u32(*(uint32x4_t *) &__reint_229, __p3_229)}; \ + __ret_229 = vcmla_rot270_f16(__s0_229, __s1_229, *(float16x4_t *) &__reint1_229); \ + __ret_229; \ +}) +#else +#define vcmla_rot270_laneq_f16(__p0_230, __p1_230, __p2_230, __p3_230) __extension__ ({ \ + float16x4_t __ret_230; \ + float16x4_t __s0_230 = __p0_230; \ + float16x4_t __s1_230 = __p1_230; \ + float16x8_t __s2_230 = __p2_230; \ + float16x4_t __rev0_230; __rev0_230 = __builtin_shufflevector(__s0_230, __s0_230, 3, 2, 1, 0); \ + float16x4_t __rev1_230; __rev1_230 = __builtin_shufflevector(__s1_230, __s1_230, 3, 2, 1, 0); \ + float16x8_t __rev2_230; __rev2_230 = __builtin_shufflevector(__s2_230, __s2_230, 7, 6, 5, 4, 3, 2, 1, 0); \ +float16x8_t __reint_230 = __rev2_230; \ +uint32x2_t __reint1_230 = (uint32x2_t) {__noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_230, __p3_230), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_230, __p3_230)}; \ + __ret_230 = __noswap_vcmla_rot270_f16(__rev0_230, __rev1_230, *(float16x4_t *) &__reint1_230); \ + __ret_230 = __builtin_shufflevector(__ret_230, __ret_230, 3, 2, 1, 0); \ + __ret_230; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcmlaq_rot270_laneq_f16(__p0_231, __p1_231, __p2_231, __p3_231) __extension__ ({ \ + float16x8_t __ret_231; \ + float16x8_t __s0_231 = __p0_231; \ + float16x8_t __s1_231 = __p1_231; \ + float16x8_t __s2_231 = __p2_231; \ +float16x8_t __reint_231 = __s2_231; \ +uint32x4_t __reint1_231 = (uint32x4_t) {vgetq_lane_u32(*(uint32x4_t *) &__reint_231, __p3_231), vgetq_lane_u32(*(uint32x4_t *) &__reint_231, __p3_231), vgetq_lane_u32(*(uint32x4_t *) &__reint_231, __p3_231), vgetq_lane_u32(*(uint32x4_t *) &__reint_231, __p3_231)}; \ + __ret_231 = vcmlaq_rot270_f16(__s0_231, __s1_231, *(float16x8_t *) &__reint1_231); \ + __ret_231; \ +}) +#else +#define vcmlaq_rot270_laneq_f16(__p0_232, __p1_232, __p2_232, __p3_232) __extension__ ({ \ + float16x8_t __ret_232; \ + float16x8_t __s0_232 = __p0_232; \ + float16x8_t __s1_232 = __p1_232; \ + float16x8_t __s2_232 = __p2_232; \ + float16x8_t __rev0_232; __rev0_232 = __builtin_shufflevector(__s0_232, __s0_232, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x8_t __rev1_232; __rev1_232 = __builtin_shufflevector(__s1_232, __s1_232, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x8_t __rev2_232; __rev2_232 = __builtin_shufflevector(__s2_232, __s2_232, 7, 6, 5, 4, 3, 2, 1, 0); \ +float16x8_t __reint_232 = __rev2_232; \ +uint32x4_t __reint1_232 = (uint32x4_t) {__noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_232, __p3_232), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_232, __p3_232), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_232, __p3_232), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_232, __p3_232)}; \ + __ret_232 = __noswap_vcmlaq_rot270_f16(__rev0_232, __rev1_232, *(float16x8_t *) &__reint1_232); \ + __ret_232 = __builtin_shufflevector(__ret_232, __ret_232, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_232; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("v8.3a,fullfp16"))) float16x8_t vcmlaq_rot90_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) { + float16x8_t __ret; + __ret = (float16x8_t) __builtin_neon_vcmlaq_rot90_f16((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 40); + return __ret; +} +#else +__ai __attribute__((target("v8.3a,fullfp16"))) float16x8_t vcmlaq_rot90_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) { + float16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (float16x8_t) __builtin_neon_vcmlaq_rot90_f16((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 40); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("v8.3a,fullfp16"))) float16x8_t __noswap_vcmlaq_rot90_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) { + float16x8_t __ret; + __ret = (float16x8_t) __builtin_neon_vcmlaq_rot90_f16((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 40); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("v8.3a,fullfp16"))) float16x4_t vcmla_rot90_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) { + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vcmla_rot90_f16((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 8); + return __ret; +} +#else +__ai __attribute__((target("v8.3a,fullfp16"))) float16x4_t vcmla_rot90_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) { + float16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + float16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = (float16x4_t) __builtin_neon_vcmla_rot90_f16((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 8); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("v8.3a,fullfp16"))) float16x4_t __noswap_vcmla_rot90_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) { + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vcmla_rot90_f16((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 8); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcmla_rot90_lane_f16(__p0_233, __p1_233, __p2_233, __p3_233) __extension__ ({ \ + float16x4_t __ret_233; \ + float16x4_t __s0_233 = __p0_233; \ + float16x4_t __s1_233 = __p1_233; \ + float16x4_t __s2_233 = __p2_233; \ +float16x4_t __reint_233 = __s2_233; \ +uint32x2_t __reint1_233 = (uint32x2_t) {vget_lane_u32(*(uint32x2_t *) &__reint_233, __p3_233), vget_lane_u32(*(uint32x2_t *) &__reint_233, __p3_233)}; \ + __ret_233 = vcmla_rot90_f16(__s0_233, __s1_233, *(float16x4_t *) &__reint1_233); \ + __ret_233; \ +}) +#else +#define vcmla_rot90_lane_f16(__p0_234, __p1_234, __p2_234, __p3_234) __extension__ ({ \ + float16x4_t __ret_234; \ + float16x4_t __s0_234 = __p0_234; \ + float16x4_t __s1_234 = __p1_234; \ + float16x4_t __s2_234 = __p2_234; \ + float16x4_t __rev0_234; __rev0_234 = __builtin_shufflevector(__s0_234, __s0_234, 3, 2, 1, 0); \ + float16x4_t __rev1_234; __rev1_234 = __builtin_shufflevector(__s1_234, __s1_234, 3, 2, 1, 0); \ + float16x4_t __rev2_234; __rev2_234 = __builtin_shufflevector(__s2_234, __s2_234, 3, 2, 1, 0); \ +float16x4_t __reint_234 = __rev2_234; \ +uint32x2_t __reint1_234 = (uint32x2_t) {__noswap_vget_lane_u32(*(uint32x2_t *) &__reint_234, __p3_234), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_234, __p3_234)}; \ + __ret_234 = __noswap_vcmla_rot90_f16(__rev0_234, __rev1_234, *(float16x4_t *) &__reint1_234); \ + __ret_234 = __builtin_shufflevector(__ret_234, __ret_234, 3, 2, 1, 0); \ + __ret_234; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcmlaq_rot90_lane_f16(__p0_235, __p1_235, __p2_235, __p3_235) __extension__ ({ \ + float16x8_t __ret_235; \ + float16x8_t __s0_235 = __p0_235; \ + float16x8_t __s1_235 = __p1_235; \ + float16x4_t __s2_235 = __p2_235; \ +float16x4_t __reint_235 = __s2_235; \ +uint32x4_t __reint1_235 = (uint32x4_t) {vget_lane_u32(*(uint32x2_t *) &__reint_235, __p3_235), vget_lane_u32(*(uint32x2_t *) &__reint_235, __p3_235), vget_lane_u32(*(uint32x2_t *) &__reint_235, __p3_235), vget_lane_u32(*(uint32x2_t *) &__reint_235, __p3_235)}; \ + __ret_235 = vcmlaq_rot90_f16(__s0_235, __s1_235, *(float16x8_t *) &__reint1_235); \ + __ret_235; \ +}) +#else +#define vcmlaq_rot90_lane_f16(__p0_236, __p1_236, __p2_236, __p3_236) __extension__ ({ \ + float16x8_t __ret_236; \ + float16x8_t __s0_236 = __p0_236; \ + float16x8_t __s1_236 = __p1_236; \ + float16x4_t __s2_236 = __p2_236; \ + float16x8_t __rev0_236; __rev0_236 = __builtin_shufflevector(__s0_236, __s0_236, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x8_t __rev1_236; __rev1_236 = __builtin_shufflevector(__s1_236, __s1_236, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x4_t __rev2_236; __rev2_236 = __builtin_shufflevector(__s2_236, __s2_236, 3, 2, 1, 0); \ +float16x4_t __reint_236 = __rev2_236; \ +uint32x4_t __reint1_236 = (uint32x4_t) {__noswap_vget_lane_u32(*(uint32x2_t *) &__reint_236, __p3_236), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_236, __p3_236), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_236, __p3_236), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_236, __p3_236)}; \ + __ret_236 = __noswap_vcmlaq_rot90_f16(__rev0_236, __rev1_236, *(float16x8_t *) &__reint1_236); \ + __ret_236 = __builtin_shufflevector(__ret_236, __ret_236, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_236; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcmla_rot90_laneq_f16(__p0_237, __p1_237, __p2_237, __p3_237) __extension__ ({ \ + float16x4_t __ret_237; \ + float16x4_t __s0_237 = __p0_237; \ + float16x4_t __s1_237 = __p1_237; \ + float16x8_t __s2_237 = __p2_237; \ +float16x8_t __reint_237 = __s2_237; \ +uint32x2_t __reint1_237 = (uint32x2_t) {vgetq_lane_u32(*(uint32x4_t *) &__reint_237, __p3_237), vgetq_lane_u32(*(uint32x4_t *) &__reint_237, __p3_237)}; \ + __ret_237 = vcmla_rot90_f16(__s0_237, __s1_237, *(float16x4_t *) &__reint1_237); \ + __ret_237; \ +}) +#else +#define vcmla_rot90_laneq_f16(__p0_238, __p1_238, __p2_238, __p3_238) __extension__ ({ \ + float16x4_t __ret_238; \ + float16x4_t __s0_238 = __p0_238; \ + float16x4_t __s1_238 = __p1_238; \ + float16x8_t __s2_238 = __p2_238; \ + float16x4_t __rev0_238; __rev0_238 = __builtin_shufflevector(__s0_238, __s0_238, 3, 2, 1, 0); \ + float16x4_t __rev1_238; __rev1_238 = __builtin_shufflevector(__s1_238, __s1_238, 3, 2, 1, 0); \ + float16x8_t __rev2_238; __rev2_238 = __builtin_shufflevector(__s2_238, __s2_238, 7, 6, 5, 4, 3, 2, 1, 0); \ +float16x8_t __reint_238 = __rev2_238; \ +uint32x2_t __reint1_238 = (uint32x2_t) {__noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_238, __p3_238), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_238, __p3_238)}; \ + __ret_238 = __noswap_vcmla_rot90_f16(__rev0_238, __rev1_238, *(float16x4_t *) &__reint1_238); \ + __ret_238 = __builtin_shufflevector(__ret_238, __ret_238, 3, 2, 1, 0); \ + __ret_238; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcmlaq_rot90_laneq_f16(__p0_239, __p1_239, __p2_239, __p3_239) __extension__ ({ \ + float16x8_t __ret_239; \ + float16x8_t __s0_239 = __p0_239; \ + float16x8_t __s1_239 = __p1_239; \ + float16x8_t __s2_239 = __p2_239; \ +float16x8_t __reint_239 = __s2_239; \ +uint32x4_t __reint1_239 = (uint32x4_t) {vgetq_lane_u32(*(uint32x4_t *) &__reint_239, __p3_239), vgetq_lane_u32(*(uint32x4_t *) &__reint_239, __p3_239), vgetq_lane_u32(*(uint32x4_t *) &__reint_239, __p3_239), vgetq_lane_u32(*(uint32x4_t *) &__reint_239, __p3_239)}; \ + __ret_239 = vcmlaq_rot90_f16(__s0_239, __s1_239, *(float16x8_t *) &__reint1_239); \ + __ret_239; \ +}) +#else +#define vcmlaq_rot90_laneq_f16(__p0_240, __p1_240, __p2_240, __p3_240) __extension__ ({ \ + float16x8_t __ret_240; \ + float16x8_t __s0_240 = __p0_240; \ + float16x8_t __s1_240 = __p1_240; \ + float16x8_t __s2_240 = __p2_240; \ + float16x8_t __rev0_240; __rev0_240 = __builtin_shufflevector(__s0_240, __s0_240, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x8_t __rev1_240; __rev1_240 = __builtin_shufflevector(__s1_240, __s1_240, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x8_t __rev2_240; __rev2_240 = __builtin_shufflevector(__s2_240, __s2_240, 7, 6, 5, 4, 3, 2, 1, 0); \ +float16x8_t __reint_240 = __rev2_240; \ +uint32x4_t __reint1_240 = (uint32x4_t) {__noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_240, __p3_240), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_240, __p3_240), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_240, __p3_240), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_240, __p3_240)}; \ + __ret_240 = __noswap_vcmlaq_rot90_f16(__rev0_240, __rev1_240, *(float16x8_t *) &__reint1_240); \ + __ret_240 = __builtin_shufflevector(__ret_240, __ret_240, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_240; \ +}) +#endif + +#if !defined(__aarch64__) +#ifdef __LITTLE_ENDIAN__ +#define vqdmulhq_lane_s32(__p0_241, __p1_241, __p2_241) __extension__ ({ \ + int32x4_t __ret_241; \ + int32x4_t __s0_241 = __p0_241; \ + int32x2_t __s1_241 = __p1_241; \ + __ret_241 = vqdmulhq_s32(__s0_241, splatq_lane_s32(__s1_241, __p2_241)); \ + __ret_241; \ +}) +#else +#define vqdmulhq_lane_s32(__p0_242, __p1_242, __p2_242) __extension__ ({ \ + int32x4_t __ret_242; \ + int32x4_t __s0_242 = __p0_242; \ + int32x2_t __s1_242 = __p1_242; \ + int32x4_t __rev0_242; __rev0_242 = __builtin_shufflevector(__s0_242, __s0_242, 3, 2, 1, 0); \ + int32x2_t __rev1_242; __rev1_242 = __builtin_shufflevector(__s1_242, __s1_242, 1, 0); \ + __ret_242 = __noswap_vqdmulhq_s32(__rev0_242, __noswap_splatq_lane_s32(__rev1_242, __p2_242)); \ + __ret_242 = __builtin_shufflevector(__ret_242, __ret_242, 3, 2, 1, 0); \ + __ret_242; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqdmulhq_lane_s16(__p0_243, __p1_243, __p2_243) __extension__ ({ \ + int16x8_t __ret_243; \ + int16x8_t __s0_243 = __p0_243; \ + int16x4_t __s1_243 = __p1_243; \ + __ret_243 = vqdmulhq_s16(__s0_243, splatq_lane_s16(__s1_243, __p2_243)); \ + __ret_243; \ +}) +#else +#define vqdmulhq_lane_s16(__p0_244, __p1_244, __p2_244) __extension__ ({ \ + int16x8_t __ret_244; \ + int16x8_t __s0_244 = __p0_244; \ + int16x4_t __s1_244 = __p1_244; \ + int16x8_t __rev0_244; __rev0_244 = __builtin_shufflevector(__s0_244, __s0_244, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x4_t __rev1_244; __rev1_244 = __builtin_shufflevector(__s1_244, __s1_244, 3, 2, 1, 0); \ + __ret_244 = __noswap_vqdmulhq_s16(__rev0_244, __noswap_splatq_lane_s16(__rev1_244, __p2_244)); \ + __ret_244 = __builtin_shufflevector(__ret_244, __ret_244, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_244; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqdmulh_lane_s32(__p0_245, __p1_245, __p2_245) __extension__ ({ \ + int32x2_t __ret_245; \ + int32x2_t __s0_245 = __p0_245; \ + int32x2_t __s1_245 = __p1_245; \ + __ret_245 = vqdmulh_s32(__s0_245, splat_lane_s32(__s1_245, __p2_245)); \ + __ret_245; \ +}) +#else +#define vqdmulh_lane_s32(__p0_246, __p1_246, __p2_246) __extension__ ({ \ + int32x2_t __ret_246; \ + int32x2_t __s0_246 = __p0_246; \ + int32x2_t __s1_246 = __p1_246; \ + int32x2_t __rev0_246; __rev0_246 = __builtin_shufflevector(__s0_246, __s0_246, 1, 0); \ + int32x2_t __rev1_246; __rev1_246 = __builtin_shufflevector(__s1_246, __s1_246, 1, 0); \ + __ret_246 = __noswap_vqdmulh_s32(__rev0_246, __noswap_splat_lane_s32(__rev1_246, __p2_246)); \ + __ret_246 = __builtin_shufflevector(__ret_246, __ret_246, 1, 0); \ + __ret_246; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqdmulh_lane_s16(__p0_247, __p1_247, __p2_247) __extension__ ({ \ + int16x4_t __ret_247; \ + int16x4_t __s0_247 = __p0_247; \ + int16x4_t __s1_247 = __p1_247; \ + __ret_247 = vqdmulh_s16(__s0_247, splat_lane_s16(__s1_247, __p2_247)); \ + __ret_247; \ +}) +#else +#define vqdmulh_lane_s16(__p0_248, __p1_248, __p2_248) __extension__ ({ \ + int16x4_t __ret_248; \ + int16x4_t __s0_248 = __p0_248; \ + int16x4_t __s1_248 = __p1_248; \ + int16x4_t __rev0_248; __rev0_248 = __builtin_shufflevector(__s0_248, __s0_248, 3, 2, 1, 0); \ + int16x4_t __rev1_248; __rev1_248 = __builtin_shufflevector(__s1_248, __s1_248, 3, 2, 1, 0); \ + __ret_248 = __noswap_vqdmulh_s16(__rev0_248, __noswap_splat_lane_s16(__rev1_248, __p2_248)); \ + __ret_248 = __builtin_shufflevector(__ret_248, __ret_248, 3, 2, 1, 0); \ + __ret_248; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrdmulhq_lane_s32(__p0_249, __p1_249, __p2_249) __extension__ ({ \ + int32x4_t __ret_249; \ + int32x4_t __s0_249 = __p0_249; \ + int32x2_t __s1_249 = __p1_249; \ + __ret_249 = vqrdmulhq_s32(__s0_249, splatq_lane_s32(__s1_249, __p2_249)); \ + __ret_249; \ +}) +#else +#define vqrdmulhq_lane_s32(__p0_250, __p1_250, __p2_250) __extension__ ({ \ + int32x4_t __ret_250; \ + int32x4_t __s0_250 = __p0_250; \ + int32x2_t __s1_250 = __p1_250; \ + int32x4_t __rev0_250; __rev0_250 = __builtin_shufflevector(__s0_250, __s0_250, 3, 2, 1, 0); \ + int32x2_t __rev1_250; __rev1_250 = __builtin_shufflevector(__s1_250, __s1_250, 1, 0); \ + __ret_250 = __noswap_vqrdmulhq_s32(__rev0_250, __noswap_splatq_lane_s32(__rev1_250, __p2_250)); \ + __ret_250 = __builtin_shufflevector(__ret_250, __ret_250, 3, 2, 1, 0); \ + __ret_250; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrdmulhq_lane_s16(__p0_251, __p1_251, __p2_251) __extension__ ({ \ + int16x8_t __ret_251; \ + int16x8_t __s0_251 = __p0_251; \ + int16x4_t __s1_251 = __p1_251; \ + __ret_251 = vqrdmulhq_s16(__s0_251, splatq_lane_s16(__s1_251, __p2_251)); \ + __ret_251; \ +}) +#else +#define vqrdmulhq_lane_s16(__p0_252, __p1_252, __p2_252) __extension__ ({ \ + int16x8_t __ret_252; \ + int16x8_t __s0_252 = __p0_252; \ + int16x4_t __s1_252 = __p1_252; \ + int16x8_t __rev0_252; __rev0_252 = __builtin_shufflevector(__s0_252, __s0_252, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x4_t __rev1_252; __rev1_252 = __builtin_shufflevector(__s1_252, __s1_252, 3, 2, 1, 0); \ + __ret_252 = __noswap_vqrdmulhq_s16(__rev0_252, __noswap_splatq_lane_s16(__rev1_252, __p2_252)); \ + __ret_252 = __builtin_shufflevector(__ret_252, __ret_252, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_252; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrdmulh_lane_s32(__p0_253, __p1_253, __p2_253) __extension__ ({ \ + int32x2_t __ret_253; \ + int32x2_t __s0_253 = __p0_253; \ + int32x2_t __s1_253 = __p1_253; \ + __ret_253 = vqrdmulh_s32(__s0_253, splat_lane_s32(__s1_253, __p2_253)); \ + __ret_253; \ +}) +#else +#define vqrdmulh_lane_s32(__p0_254, __p1_254, __p2_254) __extension__ ({ \ + int32x2_t __ret_254; \ + int32x2_t __s0_254 = __p0_254; \ + int32x2_t __s1_254 = __p1_254; \ + int32x2_t __rev0_254; __rev0_254 = __builtin_shufflevector(__s0_254, __s0_254, 1, 0); \ + int32x2_t __rev1_254; __rev1_254 = __builtin_shufflevector(__s1_254, __s1_254, 1, 0); \ + __ret_254 = __noswap_vqrdmulh_s32(__rev0_254, __noswap_splat_lane_s32(__rev1_254, __p2_254)); \ + __ret_254 = __builtin_shufflevector(__ret_254, __ret_254, 1, 0); \ + __ret_254; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrdmulh_lane_s16(__p0_255, __p1_255, __p2_255) __extension__ ({ \ + int16x4_t __ret_255; \ + int16x4_t __s0_255 = __p0_255; \ + int16x4_t __s1_255 = __p1_255; \ + __ret_255 = vqrdmulh_s16(__s0_255, splat_lane_s16(__s1_255, __p2_255)); \ + __ret_255; \ +}) +#else +#define vqrdmulh_lane_s16(__p0_256, __p1_256, __p2_256) __extension__ ({ \ + int16x4_t __ret_256; \ + int16x4_t __s0_256 = __p0_256; \ + int16x4_t __s1_256 = __p1_256; \ + int16x4_t __rev0_256; __rev0_256 = __builtin_shufflevector(__s0_256, __s0_256, 3, 2, 1, 0); \ + int16x4_t __rev1_256; __rev1_256 = __builtin_shufflevector(__s1_256, __s1_256, 3, 2, 1, 0); \ + __ret_256 = __noswap_vqrdmulh_s16(__rev0_256, __noswap_splat_lane_s16(__rev1_256, __p2_256)); \ + __ret_256 = __builtin_shufflevector(__ret_256, __ret_256, 3, 2, 1, 0); \ + __ret_256; \ +}) #endif __ai poly8x8_t vreinterpret_p8_p16(poly16x4_t __p0) { @@ -33828,6 +39286,336 @@ __ai int16x4_t vreinterpret_s16_s64(int64x1_t __p0) { __ret = (int16x4_t)(__p0); return __ret; } +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("bf16"))) bfloat16x4_t __a32_vcvt_bf16_f32(float32x4_t __p0) { + bfloat16x4_t __ret; + __ret = (bfloat16x4_t) __builtin_neon___a32_vcvt_bf16_f32((int8x16_t)__p0, 11); + return __ret; +} +#else +__ai __attribute__((target("bf16"))) bfloat16x4_t __a32_vcvt_bf16_f32(float32x4_t __p0) { + bfloat16x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (bfloat16x4_t) __builtin_neon___a32_vcvt_bf16_f32((int8x16_t)__rev0, 11); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("bf16"))) bfloat16x4_t __noswap___a32_vcvt_bf16_f32(float32x4_t __p0) { + bfloat16x4_t __ret; + __ret = (bfloat16x4_t) __builtin_neon___a32_vcvt_bf16_f32((int8x16_t)__p0, 11); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("bf16"))) bfloat16x4_t vcvt_bf16_f32(float32x4_t __p0) { + bfloat16x4_t __ret; + __ret = __a32_vcvt_bf16_f32(__p0); + return __ret; +} +#else +__ai __attribute__((target("bf16"))) bfloat16x4_t vcvt_bf16_f32(float32x4_t __p0) { + bfloat16x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = __noswap___a32_vcvt_bf16_f32(__rev0); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("bf16"))) bfloat16x8_t vcvtq_high_bf16_f32(bfloat16x8_t __p0, float32x4_t __p1) { + bfloat16x8_t __ret; + __ret = vcombine_bf16(__a32_vcvt_bf16_f32(__p1), vget_low_bf16(__p0)); + return __ret; +} +#else +__ai __attribute__((target("bf16"))) bfloat16x8_t vcvtq_high_bf16_f32(bfloat16x8_t __p0, float32x4_t __p1) { + bfloat16x8_t __ret; + bfloat16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __noswap_vcombine_bf16(__noswap___a32_vcvt_bf16_f32(__rev1), __noswap_vget_low_bf16(__rev0)); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("bf16"))) bfloat16x8_t vcvtq_low_bf16_f32(float32x4_t __p0) { + bfloat16x8_t __ret; + __ret = vcombine_bf16((bfloat16x4_t)(0ULL), __a32_vcvt_bf16_f32(__p0)); + return __ret; +} +#else +__ai __attribute__((target("bf16"))) bfloat16x8_t vcvtq_low_bf16_f32(float32x4_t __p0) { + bfloat16x8_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = __noswap_vcombine_bf16((bfloat16x4_t)(0ULL), __noswap___a32_vcvt_bf16_f32(__rev0)); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +__ai __attribute__((target("bf16"))) poly8x8_t vreinterpret_p8_bf16(bfloat16x4_t __p0) { + poly8x8_t __ret; + __ret = (poly8x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16"))) poly64x1_t vreinterpret_p64_bf16(bfloat16x4_t __p0) { + poly64x1_t __ret; + __ret = (poly64x1_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16"))) poly16x4_t vreinterpret_p16_bf16(bfloat16x4_t __p0) { + poly16x4_t __ret; + __ret = (poly16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16"))) poly8x16_t vreinterpretq_p8_bf16(bfloat16x8_t __p0) { + poly8x16_t __ret; + __ret = (poly8x16_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16"))) poly64x2_t vreinterpretq_p64_bf16(bfloat16x8_t __p0) { + poly64x2_t __ret; + __ret = (poly64x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16"))) poly16x8_t vreinterpretq_p16_bf16(bfloat16x8_t __p0) { + poly16x8_t __ret; + __ret = (poly16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16"))) uint8x16_t vreinterpretq_u8_bf16(bfloat16x8_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16"))) uint32x4_t vreinterpretq_u32_bf16(bfloat16x8_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16"))) uint64x2_t vreinterpretq_u64_bf16(bfloat16x8_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16"))) uint16x8_t vreinterpretq_u16_bf16(bfloat16x8_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16"))) int8x16_t vreinterpretq_s8_bf16(bfloat16x8_t __p0) { + int8x16_t __ret; + __ret = (int8x16_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16"))) float32x4_t vreinterpretq_f32_bf16(bfloat16x8_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16"))) float16x8_t vreinterpretq_f16_bf16(bfloat16x8_t __p0) { + float16x8_t __ret; + __ret = (float16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16"))) int32x4_t vreinterpretq_s32_bf16(bfloat16x8_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16"))) int64x2_t vreinterpretq_s64_bf16(bfloat16x8_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16"))) int16x8_t vreinterpretq_s16_bf16(bfloat16x8_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16"))) uint8x8_t vreinterpret_u8_bf16(bfloat16x4_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16"))) uint32x2_t vreinterpret_u32_bf16(bfloat16x4_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16"))) uint64x1_t vreinterpret_u64_bf16(bfloat16x4_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16"))) uint16x4_t vreinterpret_u16_bf16(bfloat16x4_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16"))) int8x8_t vreinterpret_s8_bf16(bfloat16x4_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16"))) float32x2_t vreinterpret_f32_bf16(bfloat16x4_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16"))) float16x4_t vreinterpret_f16_bf16(bfloat16x4_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16"))) int32x2_t vreinterpret_s32_bf16(bfloat16x4_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16"))) int64x1_t vreinterpret_s64_bf16(bfloat16x4_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16"))) int16x4_t vreinterpret_s16_bf16(bfloat16x4_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16"))) bfloat16x8_t vreinterpretq_bf16_p8(poly8x16_t __p0) { + bfloat16x8_t __ret; + __ret = (bfloat16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16"))) bfloat16x8_t vreinterpretq_bf16_p64(poly64x2_t __p0) { + bfloat16x8_t __ret; + __ret = (bfloat16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16"))) bfloat16x8_t vreinterpretq_bf16_p16(poly16x8_t __p0) { + bfloat16x8_t __ret; + __ret = (bfloat16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16"))) bfloat16x8_t vreinterpretq_bf16_u8(uint8x16_t __p0) { + bfloat16x8_t __ret; + __ret = (bfloat16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16"))) bfloat16x8_t vreinterpretq_bf16_u32(uint32x4_t __p0) { + bfloat16x8_t __ret; + __ret = (bfloat16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16"))) bfloat16x8_t vreinterpretq_bf16_u64(uint64x2_t __p0) { + bfloat16x8_t __ret; + __ret = (bfloat16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16"))) bfloat16x8_t vreinterpretq_bf16_u16(uint16x8_t __p0) { + bfloat16x8_t __ret; + __ret = (bfloat16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16"))) bfloat16x8_t vreinterpretq_bf16_s8(int8x16_t __p0) { + bfloat16x8_t __ret; + __ret = (bfloat16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16"))) bfloat16x8_t vreinterpretq_bf16_f32(float32x4_t __p0) { + bfloat16x8_t __ret; + __ret = (bfloat16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16"))) bfloat16x8_t vreinterpretq_bf16_f16(float16x8_t __p0) { + bfloat16x8_t __ret; + __ret = (bfloat16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16"))) bfloat16x8_t vreinterpretq_bf16_s32(int32x4_t __p0) { + bfloat16x8_t __ret; + __ret = (bfloat16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16"))) bfloat16x8_t vreinterpretq_bf16_s64(int64x2_t __p0) { + bfloat16x8_t __ret; + __ret = (bfloat16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16"))) bfloat16x8_t vreinterpretq_bf16_s16(int16x8_t __p0) { + bfloat16x8_t __ret; + __ret = (bfloat16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16"))) bfloat16x4_t vreinterpret_bf16_p8(poly8x8_t __p0) { + bfloat16x4_t __ret; + __ret = (bfloat16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16"))) bfloat16x4_t vreinterpret_bf16_p64(poly64x1_t __p0) { + bfloat16x4_t __ret; + __ret = (bfloat16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16"))) bfloat16x4_t vreinterpret_bf16_p16(poly16x4_t __p0) { + bfloat16x4_t __ret; + __ret = (bfloat16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16"))) bfloat16x4_t vreinterpret_bf16_u8(uint8x8_t __p0) { + bfloat16x4_t __ret; + __ret = (bfloat16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16"))) bfloat16x4_t vreinterpret_bf16_u32(uint32x2_t __p0) { + bfloat16x4_t __ret; + __ret = (bfloat16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16"))) bfloat16x4_t vreinterpret_bf16_u64(uint64x1_t __p0) { + bfloat16x4_t __ret; + __ret = (bfloat16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16"))) bfloat16x4_t vreinterpret_bf16_u16(uint16x4_t __p0) { + bfloat16x4_t __ret; + __ret = (bfloat16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16"))) bfloat16x4_t vreinterpret_bf16_s8(int8x8_t __p0) { + bfloat16x4_t __ret; + __ret = (bfloat16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16"))) bfloat16x4_t vreinterpret_bf16_f32(float32x2_t __p0) { + bfloat16x4_t __ret; + __ret = (bfloat16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16"))) bfloat16x4_t vreinterpret_bf16_f16(float16x4_t __p0) { + bfloat16x4_t __ret; + __ret = (bfloat16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16"))) bfloat16x4_t vreinterpret_bf16_s32(int32x2_t __p0) { + bfloat16x4_t __ret; + __ret = (bfloat16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16"))) bfloat16x4_t vreinterpret_bf16_s64(int64x1_t __p0) { + bfloat16x4_t __ret; + __ret = (bfloat16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16"))) bfloat16x4_t vreinterpret_bf16_s16(int16x4_t __p0) { + bfloat16x4_t __ret; + __ret = (bfloat16x4_t)(__p0); + return __ret; +} #endif #if (__ARM_FP & 2) #ifdef __LITTLE_ENDIAN__ @@ -35034,74 +40822,234 @@ __ai uint32x2_t vcvtp_u32_f32(float32x2_t __p0) { } #endif -#endif -#if __ARM_ARCH >= 8 && defined(__ARM_FEATURE_AES) #ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vaesdq_u8(uint8x16_t __p0, uint8x16_t __p1) { +__ai __attribute__((target("aes"))) uint8x16_t vaesdq_u8(uint8x16_t __p0, uint8x16_t __p1) { uint8x16_t __ret; - __ret = (uint8x16_t) __builtin_neon_vaesdq_v((int8x16_t)__p0, (int8x16_t)__p1, 48); + __ret = (uint8x16_t) __builtin_neon_vaesdq_u8((int8x16_t)__p0, (int8x16_t)__p1, 48); return __ret; } #else -__ai uint8x16_t vaesdq_u8(uint8x16_t __p0, uint8x16_t __p1) { +__ai __attribute__((target("aes"))) uint8x16_t vaesdq_u8(uint8x16_t __p0, uint8x16_t __p1) { uint8x16_t __ret; uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint8x16_t) __builtin_neon_vaesdq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48); + __ret = (uint8x16_t) __builtin_neon_vaesdq_u8((int8x16_t)__rev0, (int8x16_t)__rev1, 48); __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vaeseq_u8(uint8x16_t __p0, uint8x16_t __p1) { +__ai __attribute__((target("aes"))) uint8x16_t vaeseq_u8(uint8x16_t __p0, uint8x16_t __p1) { uint8x16_t __ret; - __ret = (uint8x16_t) __builtin_neon_vaeseq_v((int8x16_t)__p0, (int8x16_t)__p1, 48); + __ret = (uint8x16_t) __builtin_neon_vaeseq_u8((int8x16_t)__p0, (int8x16_t)__p1, 48); return __ret; } #else -__ai uint8x16_t vaeseq_u8(uint8x16_t __p0, uint8x16_t __p1) { +__ai __attribute__((target("aes"))) uint8x16_t vaeseq_u8(uint8x16_t __p0, uint8x16_t __p1) { uint8x16_t __ret; uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint8x16_t) __builtin_neon_vaeseq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48); + __ret = (uint8x16_t) __builtin_neon_vaeseq_u8((int8x16_t)__rev0, (int8x16_t)__rev1, 48); __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vaesimcq_u8(uint8x16_t __p0) { +__ai __attribute__((target("aes"))) uint8x16_t vaesimcq_u8(uint8x16_t __p0) { uint8x16_t __ret; - __ret = (uint8x16_t) __builtin_neon_vaesimcq_v((int8x16_t)__p0, 48); + __ret = (uint8x16_t) __builtin_neon_vaesimcq_u8((int8x16_t)__p0, 48); return __ret; } #else -__ai uint8x16_t vaesimcq_u8(uint8x16_t __p0) { +__ai __attribute__((target("aes"))) uint8x16_t vaesimcq_u8(uint8x16_t __p0) { uint8x16_t __ret; uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint8x16_t) __builtin_neon_vaesimcq_v((int8x16_t)__rev0, 48); + __ret = (uint8x16_t) __builtin_neon_vaesimcq_u8((int8x16_t)__rev0, 48); __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vaesmcq_u8(uint8x16_t __p0) { +__ai __attribute__((target("aes"))) uint8x16_t vaesmcq_u8(uint8x16_t __p0) { uint8x16_t __ret; - __ret = (uint8x16_t) __builtin_neon_vaesmcq_v((int8x16_t)__p0, 48); + __ret = (uint8x16_t) __builtin_neon_vaesmcq_u8((int8x16_t)__p0, 48); return __ret; } #else -__ai uint8x16_t vaesmcq_u8(uint8x16_t __p0) { +__ai __attribute__((target("aes"))) uint8x16_t vaesmcq_u8(uint8x16_t __p0) { uint8x16_t __ret; uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint8x16_t) __builtin_neon_vaesmcq_v((int8x16_t)__rev0, 48); + __ret = (uint8x16_t) __builtin_neon_vaesmcq_u8((int8x16_t)__rev0, 48); __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("sha2"))) uint32x4_t vsha1cq_u32(uint32x4_t __p0, uint32_t __p1, uint32x4_t __p2) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vsha1cq_u32(__p0, __p1, __p2); + return __ret; +} +#else +__ai __attribute__((target("sha2"))) uint32x4_t vsha1cq_u32(uint32x4_t __p0, uint32_t __p1, uint32x4_t __p2) { + uint32x4_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = (uint32x4_t) __builtin_neon_vsha1cq_u32(__rev0, __p1, __rev2); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +__ai __attribute__((target("sha2"))) uint32_t vsha1h_u32(uint32_t __p0) { + uint32_t __ret; + __ret = (uint32_t) __builtin_neon_vsha1h_u32(__p0); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("sha2"))) uint32x4_t vsha1mq_u32(uint32x4_t __p0, uint32_t __p1, uint32x4_t __p2) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vsha1mq_u32(__p0, __p1, __p2); + return __ret; +} +#else +__ai __attribute__((target("sha2"))) uint32x4_t vsha1mq_u32(uint32x4_t __p0, uint32_t __p1, uint32x4_t __p2) { + uint32x4_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = (uint32x4_t) __builtin_neon_vsha1mq_u32(__rev0, __p1, __rev2); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("sha2"))) uint32x4_t vsha1pq_u32(uint32x4_t __p0, uint32_t __p1, uint32x4_t __p2) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vsha1pq_u32(__p0, __p1, __p2); + return __ret; +} +#else +__ai __attribute__((target("sha2"))) uint32x4_t vsha1pq_u32(uint32x4_t __p0, uint32_t __p1, uint32x4_t __p2) { + uint32x4_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = (uint32x4_t) __builtin_neon_vsha1pq_u32(__rev0, __p1, __rev2); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("sha2"))) uint32x4_t vsha1su0q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vsha1su0q_u32((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 50); + return __ret; +} +#else +__ai __attribute__((target("sha2"))) uint32x4_t vsha1su0q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { + uint32x4_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = (uint32x4_t) __builtin_neon_vsha1su0q_u32((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("sha2"))) uint32x4_t vsha1su1q_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vsha1su1q_u32((int8x16_t)__p0, (int8x16_t)__p1, 50); + return __ret; +} +#else +__ai __attribute__((target("sha2"))) uint32x4_t vsha1su1q_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint32x4_t) __builtin_neon_vsha1su1q_u32((int8x16_t)__rev0, (int8x16_t)__rev1, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("sha2"))) uint32x4_t vsha256hq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vsha256hq_u32((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 50); + return __ret; +} +#else +__ai __attribute__((target("sha2"))) uint32x4_t vsha256hq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { + uint32x4_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = (uint32x4_t) __builtin_neon_vsha256hq_u32((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("sha2"))) uint32x4_t vsha256h2q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vsha256h2q_u32((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 50); + return __ret; +} +#else +__ai __attribute__((target("sha2"))) uint32x4_t vsha256h2q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { + uint32x4_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = (uint32x4_t) __builtin_neon_vsha256h2q_u32((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("sha2"))) uint32x4_t vsha256su0q_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vsha256su0q_u32((int8x16_t)__p0, (int8x16_t)__p1, 50); + return __ret; +} +#else +__ai __attribute__((target("sha2"))) uint32x4_t vsha256su0q_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint32x4_t) __builtin_neon_vsha256su0q_u32((int8x16_t)__rev0, (int8x16_t)__rev1, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("sha2"))) uint32x4_t vsha256su1q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vsha256su1q_u32((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 50); + return __ret; +} +#else +__ai __attribute__((target("sha2"))) uint32x4_t vsha256su1q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { + uint32x4_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = (uint32x4_t) __builtin_neon_vsha256su1q_u32((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + #endif #if __ARM_ARCH >= 8 && defined(__ARM_FEATURE_DIRECTED_ROUNDING) #ifdef __LITTLE_ENDIAN__ @@ -35333,195 +41281,193 @@ __ai float32x2_t vrndx_f32(float32x2_t __p0) { } #endif -#endif -#if __ARM_ARCH >= 8 && defined(__ARM_FEATURE_DIRECTED_ROUNDING) && defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC) #ifdef __LITTLE_ENDIAN__ -__ai float16x8_t vrndq_f16(float16x8_t __p0) { +__ai __attribute__((target("fullfp16"))) float16x8_t vrndq_f16(float16x8_t __p0) { float16x8_t __ret; - __ret = (float16x8_t) __builtin_neon_vrndq_v((int8x16_t)__p0, 40); + __ret = (float16x8_t) __builtin_neon_vrndq_f16((int8x16_t)__p0, 40); return __ret; } #else -__ai float16x8_t vrndq_f16(float16x8_t __p0) { +__ai __attribute__((target("fullfp16"))) float16x8_t vrndq_f16(float16x8_t __p0) { float16x8_t __ret; float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (float16x8_t) __builtin_neon_vrndq_v((int8x16_t)__rev0, 40); + __ret = (float16x8_t) __builtin_neon_vrndq_f16((int8x16_t)__rev0, 40); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai float16x4_t vrnd_f16(float16x4_t __p0) { +__ai __attribute__((target("fullfp16"))) float16x4_t vrnd_f16(float16x4_t __p0) { float16x4_t __ret; - __ret = (float16x4_t) __builtin_neon_vrnd_v((int8x8_t)__p0, 8); + __ret = (float16x4_t) __builtin_neon_vrnd_f16((int8x8_t)__p0, 8); return __ret; } #else -__ai float16x4_t vrnd_f16(float16x4_t __p0) { +__ai __attribute__((target("fullfp16"))) float16x4_t vrnd_f16(float16x4_t __p0) { float16x4_t __ret; float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - __ret = (float16x4_t) __builtin_neon_vrnd_v((int8x8_t)__rev0, 8); + __ret = (float16x4_t) __builtin_neon_vrnd_f16((int8x8_t)__rev0, 8); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai float16x8_t vrndaq_f16(float16x8_t __p0) { +__ai __attribute__((target("fullfp16"))) float16x8_t vrndaq_f16(float16x8_t __p0) { float16x8_t __ret; - __ret = (float16x8_t) __builtin_neon_vrndaq_v((int8x16_t)__p0, 40); + __ret = (float16x8_t) __builtin_neon_vrndaq_f16((int8x16_t)__p0, 40); return __ret; } #else -__ai float16x8_t vrndaq_f16(float16x8_t __p0) { +__ai __attribute__((target("fullfp16"))) float16x8_t vrndaq_f16(float16x8_t __p0) { float16x8_t __ret; float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (float16x8_t) __builtin_neon_vrndaq_v((int8x16_t)__rev0, 40); + __ret = (float16x8_t) __builtin_neon_vrndaq_f16((int8x16_t)__rev0, 40); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai float16x4_t vrnda_f16(float16x4_t __p0) { +__ai __attribute__((target("fullfp16"))) float16x4_t vrnda_f16(float16x4_t __p0) { float16x4_t __ret; - __ret = (float16x4_t) __builtin_neon_vrnda_v((int8x8_t)__p0, 8); + __ret = (float16x4_t) __builtin_neon_vrnda_f16((int8x8_t)__p0, 8); return __ret; } #else -__ai float16x4_t vrnda_f16(float16x4_t __p0) { +__ai __attribute__((target("fullfp16"))) float16x4_t vrnda_f16(float16x4_t __p0) { float16x4_t __ret; float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - __ret = (float16x4_t) __builtin_neon_vrnda_v((int8x8_t)__rev0, 8); + __ret = (float16x4_t) __builtin_neon_vrnda_f16((int8x8_t)__rev0, 8); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai float16x8_t vrndmq_f16(float16x8_t __p0) { +__ai __attribute__((target("fullfp16"))) float16x8_t vrndmq_f16(float16x8_t __p0) { float16x8_t __ret; - __ret = (float16x8_t) __builtin_neon_vrndmq_v((int8x16_t)__p0, 40); + __ret = (float16x8_t) __builtin_neon_vrndmq_f16((int8x16_t)__p0, 40); return __ret; } #else -__ai float16x8_t vrndmq_f16(float16x8_t __p0) { +__ai __attribute__((target("fullfp16"))) float16x8_t vrndmq_f16(float16x8_t __p0) { float16x8_t __ret; float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (float16x8_t) __builtin_neon_vrndmq_v((int8x16_t)__rev0, 40); + __ret = (float16x8_t) __builtin_neon_vrndmq_f16((int8x16_t)__rev0, 40); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai float16x4_t vrndm_f16(float16x4_t __p0) { +__ai __attribute__((target("fullfp16"))) float16x4_t vrndm_f16(float16x4_t __p0) { float16x4_t __ret; - __ret = (float16x4_t) __builtin_neon_vrndm_v((int8x8_t)__p0, 8); + __ret = (float16x4_t) __builtin_neon_vrndm_f16((int8x8_t)__p0, 8); return __ret; } #else -__ai float16x4_t vrndm_f16(float16x4_t __p0) { +__ai __attribute__((target("fullfp16"))) float16x4_t vrndm_f16(float16x4_t __p0) { float16x4_t __ret; float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - __ret = (float16x4_t) __builtin_neon_vrndm_v((int8x8_t)__rev0, 8); + __ret = (float16x4_t) __builtin_neon_vrndm_f16((int8x8_t)__rev0, 8); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai float16x8_t vrndnq_f16(float16x8_t __p0) { +__ai __attribute__((target("fullfp16"))) float16x8_t vrndnq_f16(float16x8_t __p0) { float16x8_t __ret; - __ret = (float16x8_t) __builtin_neon_vrndnq_v((int8x16_t)__p0, 40); + __ret = (float16x8_t) __builtin_neon_vrndnq_f16((int8x16_t)__p0, 40); return __ret; } #else -__ai float16x8_t vrndnq_f16(float16x8_t __p0) { +__ai __attribute__((target("fullfp16"))) float16x8_t vrndnq_f16(float16x8_t __p0) { float16x8_t __ret; float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (float16x8_t) __builtin_neon_vrndnq_v((int8x16_t)__rev0, 40); + __ret = (float16x8_t) __builtin_neon_vrndnq_f16((int8x16_t)__rev0, 40); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai float16x4_t vrndn_f16(float16x4_t __p0) { +__ai __attribute__((target("fullfp16"))) float16x4_t vrndn_f16(float16x4_t __p0) { float16x4_t __ret; - __ret = (float16x4_t) __builtin_neon_vrndn_v((int8x8_t)__p0, 8); + __ret = (float16x4_t) __builtin_neon_vrndn_f16((int8x8_t)__p0, 8); return __ret; } #else -__ai float16x4_t vrndn_f16(float16x4_t __p0) { +__ai __attribute__((target("fullfp16"))) float16x4_t vrndn_f16(float16x4_t __p0) { float16x4_t __ret; float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - __ret = (float16x4_t) __builtin_neon_vrndn_v((int8x8_t)__rev0, 8); + __ret = (float16x4_t) __builtin_neon_vrndn_f16((int8x8_t)__rev0, 8); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai float16x8_t vrndpq_f16(float16x8_t __p0) { +__ai __attribute__((target("fullfp16"))) float16x8_t vrndpq_f16(float16x8_t __p0) { float16x8_t __ret; - __ret = (float16x8_t) __builtin_neon_vrndpq_v((int8x16_t)__p0, 40); + __ret = (float16x8_t) __builtin_neon_vrndpq_f16((int8x16_t)__p0, 40); return __ret; } #else -__ai float16x8_t vrndpq_f16(float16x8_t __p0) { +__ai __attribute__((target("fullfp16"))) float16x8_t vrndpq_f16(float16x8_t __p0) { float16x8_t __ret; float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (float16x8_t) __builtin_neon_vrndpq_v((int8x16_t)__rev0, 40); + __ret = (float16x8_t) __builtin_neon_vrndpq_f16((int8x16_t)__rev0, 40); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai float16x4_t vrndp_f16(float16x4_t __p0) { +__ai __attribute__((target("fullfp16"))) float16x4_t vrndp_f16(float16x4_t __p0) { float16x4_t __ret; - __ret = (float16x4_t) __builtin_neon_vrndp_v((int8x8_t)__p0, 8); + __ret = (float16x4_t) __builtin_neon_vrndp_f16((int8x8_t)__p0, 8); return __ret; } #else -__ai float16x4_t vrndp_f16(float16x4_t __p0) { +__ai __attribute__((target("fullfp16"))) float16x4_t vrndp_f16(float16x4_t __p0) { float16x4_t __ret; float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - __ret = (float16x4_t) __builtin_neon_vrndp_v((int8x8_t)__rev0, 8); + __ret = (float16x4_t) __builtin_neon_vrndp_f16((int8x8_t)__rev0, 8); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai float16x8_t vrndxq_f16(float16x8_t __p0) { +__ai __attribute__((target("fullfp16"))) float16x8_t vrndxq_f16(float16x8_t __p0) { float16x8_t __ret; - __ret = (float16x8_t) __builtin_neon_vrndxq_v((int8x16_t)__p0, 40); + __ret = (float16x8_t) __builtin_neon_vrndxq_f16((int8x16_t)__p0, 40); return __ret; } #else -__ai float16x8_t vrndxq_f16(float16x8_t __p0) { +__ai __attribute__((target("fullfp16"))) float16x8_t vrndxq_f16(float16x8_t __p0) { float16x8_t __ret; float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (float16x8_t) __builtin_neon_vrndxq_v((int8x16_t)__rev0, 40); + __ret = (float16x8_t) __builtin_neon_vrndxq_f16((int8x16_t)__rev0, 40); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai float16x4_t vrndx_f16(float16x4_t __p0) { +__ai __attribute__((target("fullfp16"))) float16x4_t vrndx_f16(float16x4_t __p0) { float16x4_t __ret; - __ret = (float16x4_t) __builtin_neon_vrndx_v((int8x8_t)__p0, 8); + __ret = (float16x4_t) __builtin_neon_vrndx_f16((int8x8_t)__p0, 8); return __ret; } #else -__ai float16x4_t vrndx_f16(float16x4_t __p0) { +__ai __attribute__((target("fullfp16"))) float16x4_t vrndx_f16(float16x4_t __p0) { float16x4_t __ret; float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - __ret = (float16x4_t) __builtin_neon_vrndx_v((int8x8_t)__rev0, 8); + __ret = (float16x4_t) __builtin_neon_vrndx_f16((int8x8_t)__rev0, 8); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } @@ -35597,7867 +41543,74 @@ __ai float32x2_t vminnm_f32(float32x2_t __p0, float32x2_t __p1) { } #endif -#endif -#if __ARM_ARCH >= 8 && defined(__ARM_FEATURE_NUMERIC_MAXMIN) && defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC) #ifdef __LITTLE_ENDIAN__ -__ai float16x8_t vmaxnmq_f16(float16x8_t __p0, float16x8_t __p1) { +__ai __attribute__((target("fullfp16"))) float16x8_t vmaxnmq_f16(float16x8_t __p0, float16x8_t __p1) { float16x8_t __ret; - __ret = (float16x8_t) __builtin_neon_vmaxnmq_v((int8x16_t)__p0, (int8x16_t)__p1, 40); + __ret = (float16x8_t) __builtin_neon_vmaxnmq_f16((int8x16_t)__p0, (int8x16_t)__p1, 40); return __ret; } #else -__ai float16x8_t vmaxnmq_f16(float16x8_t __p0, float16x8_t __p1) { +__ai __attribute__((target("fullfp16"))) float16x8_t vmaxnmq_f16(float16x8_t __p0, float16x8_t __p1) { float16x8_t __ret; float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (float16x8_t) __builtin_neon_vmaxnmq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 40); + __ret = (float16x8_t) __builtin_neon_vmaxnmq_f16((int8x16_t)__rev0, (int8x16_t)__rev1, 40); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai float16x4_t vmaxnm_f16(float16x4_t __p0, float16x4_t __p1) { +__ai __attribute__((target("fullfp16"))) float16x4_t vmaxnm_f16(float16x4_t __p0, float16x4_t __p1) { float16x4_t __ret; - __ret = (float16x4_t) __builtin_neon_vmaxnm_v((int8x8_t)__p0, (int8x8_t)__p1, 8); + __ret = (float16x4_t) __builtin_neon_vmaxnm_f16((int8x8_t)__p0, (int8x8_t)__p1, 8); return __ret; } #else -__ai float16x4_t vmaxnm_f16(float16x4_t __p0, float16x4_t __p1) { +__ai __attribute__((target("fullfp16"))) float16x4_t vmaxnm_f16(float16x4_t __p0, float16x4_t __p1) { float16x4_t __ret; float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = (float16x4_t) __builtin_neon_vmaxnm_v((int8x8_t)__rev0, (int8x8_t)__rev1, 8); + __ret = (float16x4_t) __builtin_neon_vmaxnm_f16((int8x8_t)__rev0, (int8x8_t)__rev1, 8); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai float16x8_t vminnmq_f16(float16x8_t __p0, float16x8_t __p1) { +__ai __attribute__((target("fullfp16"))) float16x8_t vminnmq_f16(float16x8_t __p0, float16x8_t __p1) { float16x8_t __ret; - __ret = (float16x8_t) __builtin_neon_vminnmq_v((int8x16_t)__p0, (int8x16_t)__p1, 40); + __ret = (float16x8_t) __builtin_neon_vminnmq_f16((int8x16_t)__p0, (int8x16_t)__p1, 40); return __ret; } #else -__ai float16x8_t vminnmq_f16(float16x8_t __p0, float16x8_t __p1) { +__ai __attribute__((target("fullfp16"))) float16x8_t vminnmq_f16(float16x8_t __p0, float16x8_t __p1) { float16x8_t __ret; float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (float16x8_t) __builtin_neon_vminnmq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 40); + __ret = (float16x8_t) __builtin_neon_vminnmq_f16((int8x16_t)__rev0, (int8x16_t)__rev1, 40); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai float16x4_t vminnm_f16(float16x4_t __p0, float16x4_t __p1) { +__ai __attribute__((target("fullfp16"))) float16x4_t vminnm_f16(float16x4_t __p0, float16x4_t __p1) { float16x4_t __ret; - __ret = (float16x4_t) __builtin_neon_vminnm_v((int8x8_t)__p0, (int8x8_t)__p1, 8); + __ret = (float16x4_t) __builtin_neon_vminnm_f16((int8x8_t)__p0, (int8x8_t)__p1, 8); return __ret; } #else -__ai float16x4_t vminnm_f16(float16x4_t __p0, float16x4_t __p1) { +__ai __attribute__((target("fullfp16"))) float16x4_t vminnm_f16(float16x4_t __p0, float16x4_t __p1) { float16x4_t __ret; float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = (float16x4_t) __builtin_neon_vminnm_v((int8x8_t)__rev0, (int8x8_t)__rev1, 8); + __ret = (float16x4_t) __builtin_neon_vminnm_f16((int8x8_t)__rev0, (int8x8_t)__rev1, 8); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif -#endif -#if __ARM_ARCH >= 8 && defined(__ARM_FEATURE_SHA2) -#ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vsha1cq_u32(uint32x4_t __p0, uint32_t __p1, uint32x4_t __p2) { - uint32x4_t __ret; - __ret = (uint32x4_t) __builtin_neon_vsha1cq_u32(__p0, __p1, __p2); - return __ret; -} -#else -__ai uint32x4_t vsha1cq_u32(uint32x4_t __p0, uint32_t __p1, uint32x4_t __p2) { - uint32x4_t __ret; - uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); - __ret = (uint32x4_t) __builtin_neon_vsha1cq_u32(__rev0, __p1, __rev2); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -__ai uint32_t vsha1h_u32(uint32_t __p0) { - uint32_t __ret; - __ret = (uint32_t) __builtin_neon_vsha1h_u32(__p0); - return __ret; -} -#ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vsha1mq_u32(uint32x4_t __p0, uint32_t __p1, uint32x4_t __p2) { - uint32x4_t __ret; - __ret = (uint32x4_t) __builtin_neon_vsha1mq_u32(__p0, __p1, __p2); - return __ret; -} -#else -__ai uint32x4_t vsha1mq_u32(uint32x4_t __p0, uint32_t __p1, uint32x4_t __p2) { - uint32x4_t __ret; - uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); - __ret = (uint32x4_t) __builtin_neon_vsha1mq_u32(__rev0, __p1, __rev2); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vsha1pq_u32(uint32x4_t __p0, uint32_t __p1, uint32x4_t __p2) { - uint32x4_t __ret; - __ret = (uint32x4_t) __builtin_neon_vsha1pq_u32(__p0, __p1, __p2); - return __ret; -} -#else -__ai uint32x4_t vsha1pq_u32(uint32x4_t __p0, uint32_t __p1, uint32x4_t __p2) { - uint32x4_t __ret; - uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); - __ret = (uint32x4_t) __builtin_neon_vsha1pq_u32(__rev0, __p1, __rev2); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vsha1su0q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { - uint32x4_t __ret; - __ret = (uint32x4_t) __builtin_neon_vsha1su0q_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 50); - return __ret; -} -#else -__ai uint32x4_t vsha1su0q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { - uint32x4_t __ret; - uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); - __ret = (uint32x4_t) __builtin_neon_vsha1su0q_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 50); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vsha1su1q_u32(uint32x4_t __p0, uint32x4_t __p1) { - uint32x4_t __ret; - __ret = (uint32x4_t) __builtin_neon_vsha1su1q_v((int8x16_t)__p0, (int8x16_t)__p1, 50); - return __ret; -} -#else -__ai uint32x4_t vsha1su1q_u32(uint32x4_t __p0, uint32x4_t __p1) { - uint32x4_t __ret; - uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = (uint32x4_t) __builtin_neon_vsha1su1q_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vsha256hq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { - uint32x4_t __ret; - __ret = (uint32x4_t) __builtin_neon_vsha256hq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 50); - return __ret; -} -#else -__ai uint32x4_t vsha256hq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { - uint32x4_t __ret; - uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); - __ret = (uint32x4_t) __builtin_neon_vsha256hq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 50); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vsha256h2q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { - uint32x4_t __ret; - __ret = (uint32x4_t) __builtin_neon_vsha256h2q_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 50); - return __ret; -} -#else -__ai uint32x4_t vsha256h2q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { - uint32x4_t __ret; - uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); - __ret = (uint32x4_t) __builtin_neon_vsha256h2q_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 50); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vsha256su0q_u32(uint32x4_t __p0, uint32x4_t __p1) { - uint32x4_t __ret; - __ret = (uint32x4_t) __builtin_neon_vsha256su0q_v((int8x16_t)__p0, (int8x16_t)__p1, 50); - return __ret; -} -#else -__ai uint32x4_t vsha256su0q_u32(uint32x4_t __p0, uint32x4_t __p1) { - uint32x4_t __ret; - uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = (uint32x4_t) __builtin_neon_vsha256su0q_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vsha256su1q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { - uint32x4_t __ret; - __ret = (uint32x4_t) __builtin_neon_vsha256su1q_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 50); - return __ret; -} -#else -__ai uint32x4_t vsha256su1q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { - uint32x4_t __ret; - uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); - __ret = (uint32x4_t) __builtin_neon_vsha256su1q_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 50); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#endif -#if __ARM_ARCH >= 8 && defined(__ARM_FEATURE_SHA3) && defined(__aarch64__) -#ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vbcaxq_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) { - uint8x16_t __ret; - __ret = (uint8x16_t) __builtin_neon_vbcaxq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 48); - return __ret; -} -#else -__ai uint8x16_t vbcaxq_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) { - uint8x16_t __ret; - uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint8x16_t) __builtin_neon_vbcaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 48); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vbcaxq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { - uint32x4_t __ret; - __ret = (uint32x4_t) __builtin_neon_vbcaxq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 50); - return __ret; -} -#else -__ai uint32x4_t vbcaxq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { - uint32x4_t __ret; - uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); - __ret = (uint32x4_t) __builtin_neon_vbcaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 50); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vbcaxq_u64(uint64x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) { - uint64x2_t __ret; - __ret = (uint64x2_t) __builtin_neon_vbcaxq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 51); - return __ret; -} -#else -__ai uint64x2_t vbcaxq_u64(uint64x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) { - uint64x2_t __ret; - uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - uint64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); - __ret = (uint64x2_t) __builtin_neon_vbcaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 51); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vbcaxq_u16(uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) { - uint16x8_t __ret; - __ret = (uint16x8_t) __builtin_neon_vbcaxq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 49); - return __ret; -} -#else -__ai uint16x8_t vbcaxq_u16(uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) { - uint16x8_t __ret; - uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - uint16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint16x8_t) __builtin_neon_vbcaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 49); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int8x16_t vbcaxq_s8(int8x16_t __p0, int8x16_t __p1, int8x16_t __p2) { - int8x16_t __ret; - __ret = (int8x16_t) __builtin_neon_vbcaxq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 32); - return __ret; -} -#else -__ai int8x16_t vbcaxq_s8(int8x16_t __p0, int8x16_t __p1, int8x16_t __p2) { - int8x16_t __ret; - int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - int8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (int8x16_t) __builtin_neon_vbcaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 32); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vbcaxq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) { - int32x4_t __ret; - __ret = (int32x4_t) __builtin_neon_vbcaxq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 34); - return __ret; -} -#else -__ai int32x4_t vbcaxq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) { - int32x4_t __ret; - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - int32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); - __ret = (int32x4_t) __builtin_neon_vbcaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 34); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vbcaxq_s64(int64x2_t __p0, int64x2_t __p1, int64x2_t __p2) { - int64x2_t __ret; - __ret = (int64x2_t) __builtin_neon_vbcaxq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 35); - return __ret; -} -#else -__ai int64x2_t vbcaxq_s64(int64x2_t __p0, int64x2_t __p1, int64x2_t __p2) { - int64x2_t __ret; - int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - int64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); - __ret = (int64x2_t) __builtin_neon_vbcaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 35); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vbcaxq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) { - int16x8_t __ret; - __ret = (int16x8_t) __builtin_neon_vbcaxq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 33); - return __ret; -} -#else -__ai int16x8_t vbcaxq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) { - int16x8_t __ret; - int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - int16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (int16x8_t) __builtin_neon_vbcaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 33); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t veor3q_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) { - uint8x16_t __ret; - __ret = (uint8x16_t) __builtin_neon_veor3q_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 48); - return __ret; -} -#else -__ai uint8x16_t veor3q_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) { - uint8x16_t __ret; - uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint8x16_t) __builtin_neon_veor3q_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 48); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t veor3q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { - uint32x4_t __ret; - __ret = (uint32x4_t) __builtin_neon_veor3q_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 50); - return __ret; -} -#else -__ai uint32x4_t veor3q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { - uint32x4_t __ret; - uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); - __ret = (uint32x4_t) __builtin_neon_veor3q_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 50); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t veor3q_u64(uint64x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) { - uint64x2_t __ret; - __ret = (uint64x2_t) __builtin_neon_veor3q_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 51); - return __ret; -} -#else -__ai uint64x2_t veor3q_u64(uint64x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) { - uint64x2_t __ret; - uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - uint64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); - __ret = (uint64x2_t) __builtin_neon_veor3q_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 51); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t veor3q_u16(uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) { - uint16x8_t __ret; - __ret = (uint16x8_t) __builtin_neon_veor3q_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 49); - return __ret; -} -#else -__ai uint16x8_t veor3q_u16(uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) { - uint16x8_t __ret; - uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - uint16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint16x8_t) __builtin_neon_veor3q_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 49); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int8x16_t veor3q_s8(int8x16_t __p0, int8x16_t __p1, int8x16_t __p2) { - int8x16_t __ret; - __ret = (int8x16_t) __builtin_neon_veor3q_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 32); - return __ret; -} -#else -__ai int8x16_t veor3q_s8(int8x16_t __p0, int8x16_t __p1, int8x16_t __p2) { - int8x16_t __ret; - int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - int8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (int8x16_t) __builtin_neon_veor3q_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 32); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x4_t veor3q_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) { - int32x4_t __ret; - __ret = (int32x4_t) __builtin_neon_veor3q_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 34); - return __ret; -} -#else -__ai int32x4_t veor3q_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) { - int32x4_t __ret; - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - int32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); - __ret = (int32x4_t) __builtin_neon_veor3q_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 34); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int64x2_t veor3q_s64(int64x2_t __p0, int64x2_t __p1, int64x2_t __p2) { - int64x2_t __ret; - __ret = (int64x2_t) __builtin_neon_veor3q_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 35); - return __ret; -} -#else -__ai int64x2_t veor3q_s64(int64x2_t __p0, int64x2_t __p1, int64x2_t __p2) { - int64x2_t __ret; - int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - int64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); - __ret = (int64x2_t) __builtin_neon_veor3q_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 35); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16x8_t veor3q_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) { - int16x8_t __ret; - __ret = (int16x8_t) __builtin_neon_veor3q_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 33); - return __ret; -} -#else -__ai int16x8_t veor3q_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) { - int16x8_t __ret; - int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - int16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (int16x8_t) __builtin_neon_veor3q_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 33); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vrax1q_u64(uint64x2_t __p0, uint64x2_t __p1) { - uint64x2_t __ret; - __ret = (uint64x2_t) __builtin_neon_vrax1q_v((int8x16_t)__p0, (int8x16_t)__p1, 51); - return __ret; -} -#else -__ai uint64x2_t vrax1q_u64(uint64x2_t __p0, uint64x2_t __p1) { - uint64x2_t __ret; - uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __ret = (uint64x2_t) __builtin_neon_vrax1q_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vxarq_u64(__p0, __p1, __p2) __extension__ ({ \ - uint64x2_t __ret; \ - uint64x2_t __s0 = __p0; \ - uint64x2_t __s1 = __p1; \ - __ret = (uint64x2_t) __builtin_neon_vxarq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 51); \ - __ret; \ -}) -#else -#define vxarq_u64(__p0, __p1, __p2) __extension__ ({ \ - uint64x2_t __ret; \ - uint64x2_t __s0 = __p0; \ - uint64x2_t __s1 = __p1; \ - uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ - uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ - __ret = (uint64x2_t) __builtin_neon_vxarq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 51); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) -#endif - -#endif -#if __ARM_ARCH >= 8 && defined(__ARM_FEATURE_SHA512) && defined(__aarch64__) -#ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vsha512hq_u64(uint64x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) { - uint64x2_t __ret; - __ret = (uint64x2_t) __builtin_neon_vsha512hq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 51); - return __ret; -} -#else -__ai uint64x2_t vsha512hq_u64(uint64x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) { - uint64x2_t __ret; - uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - uint64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); - __ret = (uint64x2_t) __builtin_neon_vsha512hq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 51); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vsha512h2q_u64(uint64x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) { - uint64x2_t __ret; - __ret = (uint64x2_t) __builtin_neon_vsha512h2q_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 51); - return __ret; -} -#else -__ai uint64x2_t vsha512h2q_u64(uint64x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) { - uint64x2_t __ret; - uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - uint64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); - __ret = (uint64x2_t) __builtin_neon_vsha512h2q_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 51); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vsha512su0q_u64(uint64x2_t __p0, uint64x2_t __p1) { - uint64x2_t __ret; - __ret = (uint64x2_t) __builtin_neon_vsha512su0q_v((int8x16_t)__p0, (int8x16_t)__p1, 51); - return __ret; -} -#else -__ai uint64x2_t vsha512su0q_u64(uint64x2_t __p0, uint64x2_t __p1) { - uint64x2_t __ret; - uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __ret = (uint64x2_t) __builtin_neon_vsha512su0q_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vsha512su1q_u64(uint64x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) { - uint64x2_t __ret; - __ret = (uint64x2_t) __builtin_neon_vsha512su1q_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 51); - return __ret; -} -#else -__ai uint64x2_t vsha512su1q_u64(uint64x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) { - uint64x2_t __ret; - uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - uint64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); - __ret = (uint64x2_t) __builtin_neon_vsha512su1q_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 51); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#endif -#if __ARM_ARCH >= 8 && defined(__ARM_FEATURE_SM3) && defined(__aarch64__) -#ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vsm3partw1q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { - uint32x4_t __ret; - __ret = (uint32x4_t) __builtin_neon_vsm3partw1q_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 50); - return __ret; -} -#else -__ai uint32x4_t vsm3partw1q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { - uint32x4_t __ret; - uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); - __ret = (uint32x4_t) __builtin_neon_vsm3partw1q_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 50); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vsm3partw2q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { - uint32x4_t __ret; - __ret = (uint32x4_t) __builtin_neon_vsm3partw2q_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 50); - return __ret; -} -#else -__ai uint32x4_t vsm3partw2q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { - uint32x4_t __ret; - uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); - __ret = (uint32x4_t) __builtin_neon_vsm3partw2q_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 50); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vsm3ss1q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { - uint32x4_t __ret; - __ret = (uint32x4_t) __builtin_neon_vsm3ss1q_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 50); - return __ret; -} -#else -__ai uint32x4_t vsm3ss1q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { - uint32x4_t __ret; - uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); - __ret = (uint32x4_t) __builtin_neon_vsm3ss1q_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 50); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vsm3tt1aq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \ - uint32x4_t __ret; \ - uint32x4_t __s0 = __p0; \ - uint32x4_t __s1 = __p1; \ - uint32x4_t __s2 = __p2; \ - __ret = (uint32x4_t) __builtin_neon_vsm3tt1aq_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x16_t)__s2, __p3, 50); \ - __ret; \ -}) -#else -#define vsm3tt1aq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \ - uint32x4_t __ret; \ - uint32x4_t __s0 = __p0; \ - uint32x4_t __s1 = __p1; \ - uint32x4_t __s2 = __p2; \ - uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ - uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \ - __ret = (uint32x4_t) __builtin_neon_vsm3tt1aq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, __p3, 50); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vsm3tt1bq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \ - uint32x4_t __ret; \ - uint32x4_t __s0 = __p0; \ - uint32x4_t __s1 = __p1; \ - uint32x4_t __s2 = __p2; \ - __ret = (uint32x4_t) __builtin_neon_vsm3tt1bq_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x16_t)__s2, __p3, 50); \ - __ret; \ -}) -#else -#define vsm3tt1bq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \ - uint32x4_t __ret; \ - uint32x4_t __s0 = __p0; \ - uint32x4_t __s1 = __p1; \ - uint32x4_t __s2 = __p2; \ - uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ - uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \ - __ret = (uint32x4_t) __builtin_neon_vsm3tt1bq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, __p3, 50); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vsm3tt2aq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \ - uint32x4_t __ret; \ - uint32x4_t __s0 = __p0; \ - uint32x4_t __s1 = __p1; \ - uint32x4_t __s2 = __p2; \ - __ret = (uint32x4_t) __builtin_neon_vsm3tt2aq_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x16_t)__s2, __p3, 50); \ - __ret; \ -}) -#else -#define vsm3tt2aq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \ - uint32x4_t __ret; \ - uint32x4_t __s0 = __p0; \ - uint32x4_t __s1 = __p1; \ - uint32x4_t __s2 = __p2; \ - uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ - uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \ - __ret = (uint32x4_t) __builtin_neon_vsm3tt2aq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, __p3, 50); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vsm3tt2bq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \ - uint32x4_t __ret; \ - uint32x4_t __s0 = __p0; \ - uint32x4_t __s1 = __p1; \ - uint32x4_t __s2 = __p2; \ - __ret = (uint32x4_t) __builtin_neon_vsm3tt2bq_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x16_t)__s2, __p3, 50); \ - __ret; \ -}) -#else -#define vsm3tt2bq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \ - uint32x4_t __ret; \ - uint32x4_t __s0 = __p0; \ - uint32x4_t __s1 = __p1; \ - uint32x4_t __s2 = __p2; \ - uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ - uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \ - __ret = (uint32x4_t) __builtin_neon_vsm3tt2bq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, __p3, 50); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#endif -#if __ARM_ARCH >= 8 && defined(__ARM_FEATURE_SM4) && defined(__aarch64__) -#ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vsm4eq_u32(uint32x4_t __p0, uint32x4_t __p1) { - uint32x4_t __ret; - __ret = (uint32x4_t) __builtin_neon_vsm4eq_v((int8x16_t)__p0, (int8x16_t)__p1, 50); - return __ret; -} -#else -__ai uint32x4_t vsm4eq_u32(uint32x4_t __p0, uint32x4_t __p1) { - uint32x4_t __ret; - uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = (uint32x4_t) __builtin_neon_vsm4eq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vsm4ekeyq_u32(uint32x4_t __p0, uint32x4_t __p1) { - uint32x4_t __ret; - __ret = (uint32x4_t) __builtin_neon_vsm4ekeyq_v((int8x16_t)__p0, (int8x16_t)__p1, 50); - return __ret; -} -#else -__ai uint32x4_t vsm4ekeyq_u32(uint32x4_t __p0, uint32x4_t __p1) { - uint32x4_t __ret; - uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = (uint32x4_t) __builtin_neon_vsm4ekeyq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#endif -#if __ARM_ARCH >= 8 && defined(__aarch64__) -#ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vcvtaq_s64_f64(float64x2_t __p0) { - int64x2_t __ret; - __ret = (int64x2_t) __builtin_neon_vcvtaq_s64_v((int8x16_t)__p0, 35); - return __ret; -} -#else -__ai int64x2_t vcvtaq_s64_f64(float64x2_t __p0) { - int64x2_t __ret; - float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - __ret = (int64x2_t) __builtin_neon_vcvtaq_s64_v((int8x16_t)__rev0, 35); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -__ai int64x1_t vcvta_s64_f64(float64x1_t __p0) { - int64x1_t __ret; - __ret = (int64x1_t) __builtin_neon_vcvta_s64_v((int8x8_t)__p0, 3); - return __ret; -} -#ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vcvtaq_u64_f64(float64x2_t __p0) { - uint64x2_t __ret; - __ret = (uint64x2_t) __builtin_neon_vcvtaq_u64_v((int8x16_t)__p0, 51); - return __ret; -} -#else -__ai uint64x2_t vcvtaq_u64_f64(float64x2_t __p0) { - uint64x2_t __ret; - float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - __ret = (uint64x2_t) __builtin_neon_vcvtaq_u64_v((int8x16_t)__rev0, 51); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -__ai uint64x1_t vcvta_u64_f64(float64x1_t __p0) { - uint64x1_t __ret; - __ret = (uint64x1_t) __builtin_neon_vcvta_u64_v((int8x8_t)__p0, 19); - return __ret; -} -#ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vcvtmq_s64_f64(float64x2_t __p0) { - int64x2_t __ret; - __ret = (int64x2_t) __builtin_neon_vcvtmq_s64_v((int8x16_t)__p0, 35); - return __ret; -} -#else -__ai int64x2_t vcvtmq_s64_f64(float64x2_t __p0) { - int64x2_t __ret; - float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - __ret = (int64x2_t) __builtin_neon_vcvtmq_s64_v((int8x16_t)__rev0, 35); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -__ai int64x1_t vcvtm_s64_f64(float64x1_t __p0) { - int64x1_t __ret; - __ret = (int64x1_t) __builtin_neon_vcvtm_s64_v((int8x8_t)__p0, 3); - return __ret; -} -#ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vcvtmq_u64_f64(float64x2_t __p0) { - uint64x2_t __ret; - __ret = (uint64x2_t) __builtin_neon_vcvtmq_u64_v((int8x16_t)__p0, 51); - return __ret; -} -#else -__ai uint64x2_t vcvtmq_u64_f64(float64x2_t __p0) { - uint64x2_t __ret; - float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - __ret = (uint64x2_t) __builtin_neon_vcvtmq_u64_v((int8x16_t)__rev0, 51); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -__ai uint64x1_t vcvtm_u64_f64(float64x1_t __p0) { - uint64x1_t __ret; - __ret = (uint64x1_t) __builtin_neon_vcvtm_u64_v((int8x8_t)__p0, 19); - return __ret; -} -#ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vcvtnq_s64_f64(float64x2_t __p0) { - int64x2_t __ret; - __ret = (int64x2_t) __builtin_neon_vcvtnq_s64_v((int8x16_t)__p0, 35); - return __ret; -} -#else -__ai int64x2_t vcvtnq_s64_f64(float64x2_t __p0) { - int64x2_t __ret; - float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - __ret = (int64x2_t) __builtin_neon_vcvtnq_s64_v((int8x16_t)__rev0, 35); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -__ai int64x1_t vcvtn_s64_f64(float64x1_t __p0) { - int64x1_t __ret; - __ret = (int64x1_t) __builtin_neon_vcvtn_s64_v((int8x8_t)__p0, 3); - return __ret; -} -#ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vcvtnq_u64_f64(float64x2_t __p0) { - uint64x2_t __ret; - __ret = (uint64x2_t) __builtin_neon_vcvtnq_u64_v((int8x16_t)__p0, 51); - return __ret; -} -#else -__ai uint64x2_t vcvtnq_u64_f64(float64x2_t __p0) { - uint64x2_t __ret; - float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - __ret = (uint64x2_t) __builtin_neon_vcvtnq_u64_v((int8x16_t)__rev0, 51); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -__ai uint64x1_t vcvtn_u64_f64(float64x1_t __p0) { - uint64x1_t __ret; - __ret = (uint64x1_t) __builtin_neon_vcvtn_u64_v((int8x8_t)__p0, 19); - return __ret; -} -#ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vcvtpq_s64_f64(float64x2_t __p0) { - int64x2_t __ret; - __ret = (int64x2_t) __builtin_neon_vcvtpq_s64_v((int8x16_t)__p0, 35); - return __ret; -} -#else -__ai int64x2_t vcvtpq_s64_f64(float64x2_t __p0) { - int64x2_t __ret; - float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - __ret = (int64x2_t) __builtin_neon_vcvtpq_s64_v((int8x16_t)__rev0, 35); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -__ai int64x1_t vcvtp_s64_f64(float64x1_t __p0) { - int64x1_t __ret; - __ret = (int64x1_t) __builtin_neon_vcvtp_s64_v((int8x8_t)__p0, 3); - return __ret; -} -#ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vcvtpq_u64_f64(float64x2_t __p0) { - uint64x2_t __ret; - __ret = (uint64x2_t) __builtin_neon_vcvtpq_u64_v((int8x16_t)__p0, 51); - return __ret; -} -#else -__ai uint64x2_t vcvtpq_u64_f64(float64x2_t __p0) { - uint64x2_t __ret; - float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - __ret = (uint64x2_t) __builtin_neon_vcvtpq_u64_v((int8x16_t)__rev0, 51); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -__ai uint64x1_t vcvtp_u64_f64(float64x1_t __p0) { - uint64x1_t __ret; - __ret = (uint64x1_t) __builtin_neon_vcvtp_u64_v((int8x8_t)__p0, 19); - return __ret; -} -__ai poly8x8_t vreinterpret_p8_p64(poly64x1_t __p0) { - poly8x8_t __ret; - __ret = (poly8x8_t)(__p0); - return __ret; -} -__ai poly8x8_t vreinterpret_p8_p16(poly16x4_t __p0) { - poly8x8_t __ret; - __ret = (poly8x8_t)(__p0); - return __ret; -} -__ai poly8x8_t vreinterpret_p8_u8(uint8x8_t __p0) { - poly8x8_t __ret; - __ret = (poly8x8_t)(__p0); - return __ret; -} -__ai poly8x8_t vreinterpret_p8_u32(uint32x2_t __p0) { - poly8x8_t __ret; - __ret = (poly8x8_t)(__p0); - return __ret; -} -__ai poly8x8_t vreinterpret_p8_u64(uint64x1_t __p0) { - poly8x8_t __ret; - __ret = (poly8x8_t)(__p0); - return __ret; -} -__ai poly8x8_t vreinterpret_p8_u16(uint16x4_t __p0) { - poly8x8_t __ret; - __ret = (poly8x8_t)(__p0); - return __ret; -} -__ai poly8x8_t vreinterpret_p8_s8(int8x8_t __p0) { - poly8x8_t __ret; - __ret = (poly8x8_t)(__p0); - return __ret; -} -__ai poly8x8_t vreinterpret_p8_f64(float64x1_t __p0) { - poly8x8_t __ret; - __ret = (poly8x8_t)(__p0); - return __ret; -} -__ai poly8x8_t vreinterpret_p8_f32(float32x2_t __p0) { - poly8x8_t __ret; - __ret = (poly8x8_t)(__p0); - return __ret; -} -__ai poly8x8_t vreinterpret_p8_f16(float16x4_t __p0) { - poly8x8_t __ret; - __ret = (poly8x8_t)(__p0); - return __ret; -} -__ai poly8x8_t vreinterpret_p8_s32(int32x2_t __p0) { - poly8x8_t __ret; - __ret = (poly8x8_t)(__p0); - return __ret; -} -__ai poly8x8_t vreinterpret_p8_s64(int64x1_t __p0) { - poly8x8_t __ret; - __ret = (poly8x8_t)(__p0); - return __ret; -} -__ai poly8x8_t vreinterpret_p8_s16(int16x4_t __p0) { - poly8x8_t __ret; - __ret = (poly8x8_t)(__p0); - return __ret; -} -__ai poly64x1_t vreinterpret_p64_p8(poly8x8_t __p0) { - poly64x1_t __ret; - __ret = (poly64x1_t)(__p0); - return __ret; -} -__ai poly64x1_t vreinterpret_p64_p16(poly16x4_t __p0) { - poly64x1_t __ret; - __ret = (poly64x1_t)(__p0); - return __ret; -} -__ai poly64x1_t vreinterpret_p64_u8(uint8x8_t __p0) { - poly64x1_t __ret; - __ret = (poly64x1_t)(__p0); - return __ret; -} -__ai poly64x1_t vreinterpret_p64_u32(uint32x2_t __p0) { - poly64x1_t __ret; - __ret = (poly64x1_t)(__p0); - return __ret; -} -__ai poly64x1_t vreinterpret_p64_u64(uint64x1_t __p0) { - poly64x1_t __ret; - __ret = (poly64x1_t)(__p0); - return __ret; -} -__ai poly64x1_t vreinterpret_p64_u16(uint16x4_t __p0) { - poly64x1_t __ret; - __ret = (poly64x1_t)(__p0); - return __ret; -} -__ai poly64x1_t vreinterpret_p64_s8(int8x8_t __p0) { - poly64x1_t __ret; - __ret = (poly64x1_t)(__p0); - return __ret; -} -__ai poly64x1_t vreinterpret_p64_f64(float64x1_t __p0) { - poly64x1_t __ret; - __ret = (poly64x1_t)(__p0); - return __ret; -} -__ai poly64x1_t vreinterpret_p64_f32(float32x2_t __p0) { - poly64x1_t __ret; - __ret = (poly64x1_t)(__p0); - return __ret; -} -__ai poly64x1_t vreinterpret_p64_f16(float16x4_t __p0) { - poly64x1_t __ret; - __ret = (poly64x1_t)(__p0); - return __ret; -} -__ai poly64x1_t vreinterpret_p64_s32(int32x2_t __p0) { - poly64x1_t __ret; - __ret = (poly64x1_t)(__p0); - return __ret; -} -__ai poly64x1_t vreinterpret_p64_s64(int64x1_t __p0) { - poly64x1_t __ret; - __ret = (poly64x1_t)(__p0); - return __ret; -} -__ai poly64x1_t vreinterpret_p64_s16(int16x4_t __p0) { - poly64x1_t __ret; - __ret = (poly64x1_t)(__p0); - return __ret; -} -__ai poly16x4_t vreinterpret_p16_p8(poly8x8_t __p0) { - poly16x4_t __ret; - __ret = (poly16x4_t)(__p0); - return __ret; -} -__ai poly16x4_t vreinterpret_p16_p64(poly64x1_t __p0) { - poly16x4_t __ret; - __ret = (poly16x4_t)(__p0); - return __ret; -} -__ai poly16x4_t vreinterpret_p16_u8(uint8x8_t __p0) { - poly16x4_t __ret; - __ret = (poly16x4_t)(__p0); - return __ret; -} -__ai poly16x4_t vreinterpret_p16_u32(uint32x2_t __p0) { - poly16x4_t __ret; - __ret = (poly16x4_t)(__p0); - return __ret; -} -__ai poly16x4_t vreinterpret_p16_u64(uint64x1_t __p0) { - poly16x4_t __ret; - __ret = (poly16x4_t)(__p0); - return __ret; -} -__ai poly16x4_t vreinterpret_p16_u16(uint16x4_t __p0) { - poly16x4_t __ret; - __ret = (poly16x4_t)(__p0); - return __ret; -} -__ai poly16x4_t vreinterpret_p16_s8(int8x8_t __p0) { - poly16x4_t __ret; - __ret = (poly16x4_t)(__p0); - return __ret; -} -__ai poly16x4_t vreinterpret_p16_f64(float64x1_t __p0) { - poly16x4_t __ret; - __ret = (poly16x4_t)(__p0); - return __ret; -} -__ai poly16x4_t vreinterpret_p16_f32(float32x2_t __p0) { - poly16x4_t __ret; - __ret = (poly16x4_t)(__p0); - return __ret; -} -__ai poly16x4_t vreinterpret_p16_f16(float16x4_t __p0) { - poly16x4_t __ret; - __ret = (poly16x4_t)(__p0); - return __ret; -} -__ai poly16x4_t vreinterpret_p16_s32(int32x2_t __p0) { - poly16x4_t __ret; - __ret = (poly16x4_t)(__p0); - return __ret; -} -__ai poly16x4_t vreinterpret_p16_s64(int64x1_t __p0) { - poly16x4_t __ret; - __ret = (poly16x4_t)(__p0); - return __ret; -} -__ai poly16x4_t vreinterpret_p16_s16(int16x4_t __p0) { - poly16x4_t __ret; - __ret = (poly16x4_t)(__p0); - return __ret; -} -__ai poly8x16_t vreinterpretq_p8_p128(poly128_t __p0) { - poly8x16_t __ret; - __ret = (poly8x16_t)(__p0); - return __ret; -} -__ai poly8x16_t vreinterpretq_p8_p64(poly64x2_t __p0) { - poly8x16_t __ret; - __ret = (poly8x16_t)(__p0); - return __ret; -} -__ai poly8x16_t vreinterpretq_p8_p16(poly16x8_t __p0) { - poly8x16_t __ret; - __ret = (poly8x16_t)(__p0); - return __ret; -} -__ai poly8x16_t vreinterpretq_p8_u8(uint8x16_t __p0) { - poly8x16_t __ret; - __ret = (poly8x16_t)(__p0); - return __ret; -} -__ai poly8x16_t vreinterpretq_p8_u32(uint32x4_t __p0) { - poly8x16_t __ret; - __ret = (poly8x16_t)(__p0); - return __ret; -} -__ai poly8x16_t vreinterpretq_p8_u64(uint64x2_t __p0) { - poly8x16_t __ret; - __ret = (poly8x16_t)(__p0); - return __ret; -} -__ai poly8x16_t vreinterpretq_p8_u16(uint16x8_t __p0) { - poly8x16_t __ret; - __ret = (poly8x16_t)(__p0); - return __ret; -} -__ai poly8x16_t vreinterpretq_p8_s8(int8x16_t __p0) { - poly8x16_t __ret; - __ret = (poly8x16_t)(__p0); - return __ret; -} -__ai poly8x16_t vreinterpretq_p8_f64(float64x2_t __p0) { - poly8x16_t __ret; - __ret = (poly8x16_t)(__p0); - return __ret; -} -__ai poly8x16_t vreinterpretq_p8_f32(float32x4_t __p0) { - poly8x16_t __ret; - __ret = (poly8x16_t)(__p0); - return __ret; -} -__ai poly8x16_t vreinterpretq_p8_f16(float16x8_t __p0) { - poly8x16_t __ret; - __ret = (poly8x16_t)(__p0); - return __ret; -} -__ai poly8x16_t vreinterpretq_p8_s32(int32x4_t __p0) { - poly8x16_t __ret; - __ret = (poly8x16_t)(__p0); - return __ret; -} -__ai poly8x16_t vreinterpretq_p8_s64(int64x2_t __p0) { - poly8x16_t __ret; - __ret = (poly8x16_t)(__p0); - return __ret; -} -__ai poly8x16_t vreinterpretq_p8_s16(int16x8_t __p0) { - poly8x16_t __ret; - __ret = (poly8x16_t)(__p0); - return __ret; -} -__ai poly128_t vreinterpretq_p128_p8(poly8x16_t __p0) { - poly128_t __ret; - __ret = (poly128_t)(__p0); - return __ret; -} -__ai poly128_t vreinterpretq_p128_p64(poly64x2_t __p0) { - poly128_t __ret; - __ret = (poly128_t)(__p0); - return __ret; -} -__ai poly128_t vreinterpretq_p128_p16(poly16x8_t __p0) { - poly128_t __ret; - __ret = (poly128_t)(__p0); - return __ret; -} -__ai poly128_t vreinterpretq_p128_u8(uint8x16_t __p0) { - poly128_t __ret; - __ret = (poly128_t)(__p0); - return __ret; -} -__ai poly128_t vreinterpretq_p128_u32(uint32x4_t __p0) { - poly128_t __ret; - __ret = (poly128_t)(__p0); - return __ret; -} -__ai poly128_t vreinterpretq_p128_u64(uint64x2_t __p0) { - poly128_t __ret; - __ret = (poly128_t)(__p0); - return __ret; -} -__ai poly128_t vreinterpretq_p128_u16(uint16x8_t __p0) { - poly128_t __ret; - __ret = (poly128_t)(__p0); - return __ret; -} -__ai poly128_t vreinterpretq_p128_s8(int8x16_t __p0) { - poly128_t __ret; - __ret = (poly128_t)(__p0); - return __ret; -} -__ai poly128_t vreinterpretq_p128_f64(float64x2_t __p0) { - poly128_t __ret; - __ret = (poly128_t)(__p0); - return __ret; -} -__ai poly128_t vreinterpretq_p128_f32(float32x4_t __p0) { - poly128_t __ret; - __ret = (poly128_t)(__p0); - return __ret; -} -__ai poly128_t vreinterpretq_p128_f16(float16x8_t __p0) { - poly128_t __ret; - __ret = (poly128_t)(__p0); - return __ret; -} -__ai poly128_t vreinterpretq_p128_s32(int32x4_t __p0) { - poly128_t __ret; - __ret = (poly128_t)(__p0); - return __ret; -} -__ai poly128_t vreinterpretq_p128_s64(int64x2_t __p0) { - poly128_t __ret; - __ret = (poly128_t)(__p0); - return __ret; -} -__ai poly128_t vreinterpretq_p128_s16(int16x8_t __p0) { - poly128_t __ret; - __ret = (poly128_t)(__p0); - return __ret; -} -__ai poly64x2_t vreinterpretq_p64_p8(poly8x16_t __p0) { - poly64x2_t __ret; - __ret = (poly64x2_t)(__p0); - return __ret; -} -__ai poly64x2_t vreinterpretq_p64_p128(poly128_t __p0) { - poly64x2_t __ret; - __ret = (poly64x2_t)(__p0); - return __ret; -} -__ai poly64x2_t vreinterpretq_p64_p16(poly16x8_t __p0) { - poly64x2_t __ret; - __ret = (poly64x2_t)(__p0); - return __ret; -} -__ai poly64x2_t vreinterpretq_p64_u8(uint8x16_t __p0) { - poly64x2_t __ret; - __ret = (poly64x2_t)(__p0); - return __ret; -} -__ai poly64x2_t vreinterpretq_p64_u32(uint32x4_t __p0) { - poly64x2_t __ret; - __ret = (poly64x2_t)(__p0); - return __ret; -} -__ai poly64x2_t vreinterpretq_p64_u64(uint64x2_t __p0) { - poly64x2_t __ret; - __ret = (poly64x2_t)(__p0); - return __ret; -} -__ai poly64x2_t vreinterpretq_p64_u16(uint16x8_t __p0) { - poly64x2_t __ret; - __ret = (poly64x2_t)(__p0); - return __ret; -} -__ai poly64x2_t vreinterpretq_p64_s8(int8x16_t __p0) { - poly64x2_t __ret; - __ret = (poly64x2_t)(__p0); - return __ret; -} -__ai poly64x2_t vreinterpretq_p64_f64(float64x2_t __p0) { - poly64x2_t __ret; - __ret = (poly64x2_t)(__p0); - return __ret; -} -__ai poly64x2_t vreinterpretq_p64_f32(float32x4_t __p0) { - poly64x2_t __ret; - __ret = (poly64x2_t)(__p0); - return __ret; -} -__ai poly64x2_t vreinterpretq_p64_f16(float16x8_t __p0) { - poly64x2_t __ret; - __ret = (poly64x2_t)(__p0); - return __ret; -} -__ai poly64x2_t vreinterpretq_p64_s32(int32x4_t __p0) { - poly64x2_t __ret; - __ret = (poly64x2_t)(__p0); - return __ret; -} -__ai poly64x2_t vreinterpretq_p64_s64(int64x2_t __p0) { - poly64x2_t __ret; - __ret = (poly64x2_t)(__p0); - return __ret; -} -__ai poly64x2_t vreinterpretq_p64_s16(int16x8_t __p0) { - poly64x2_t __ret; - __ret = (poly64x2_t)(__p0); - return __ret; -} -__ai poly16x8_t vreinterpretq_p16_p8(poly8x16_t __p0) { - poly16x8_t __ret; - __ret = (poly16x8_t)(__p0); - return __ret; -} -__ai poly16x8_t vreinterpretq_p16_p128(poly128_t __p0) { - poly16x8_t __ret; - __ret = (poly16x8_t)(__p0); - return __ret; -} -__ai poly16x8_t vreinterpretq_p16_p64(poly64x2_t __p0) { - poly16x8_t __ret; - __ret = (poly16x8_t)(__p0); - return __ret; -} -__ai poly16x8_t vreinterpretq_p16_u8(uint8x16_t __p0) { - poly16x8_t __ret; - __ret = (poly16x8_t)(__p0); - return __ret; -} -__ai poly16x8_t vreinterpretq_p16_u32(uint32x4_t __p0) { - poly16x8_t __ret; - __ret = (poly16x8_t)(__p0); - return __ret; -} -__ai poly16x8_t vreinterpretq_p16_u64(uint64x2_t __p0) { - poly16x8_t __ret; - __ret = (poly16x8_t)(__p0); - return __ret; -} -__ai poly16x8_t vreinterpretq_p16_u16(uint16x8_t __p0) { - poly16x8_t __ret; - __ret = (poly16x8_t)(__p0); - return __ret; -} -__ai poly16x8_t vreinterpretq_p16_s8(int8x16_t __p0) { - poly16x8_t __ret; - __ret = (poly16x8_t)(__p0); - return __ret; -} -__ai poly16x8_t vreinterpretq_p16_f64(float64x2_t __p0) { - poly16x8_t __ret; - __ret = (poly16x8_t)(__p0); - return __ret; -} -__ai poly16x8_t vreinterpretq_p16_f32(float32x4_t __p0) { - poly16x8_t __ret; - __ret = (poly16x8_t)(__p0); - return __ret; -} -__ai poly16x8_t vreinterpretq_p16_f16(float16x8_t __p0) { - poly16x8_t __ret; - __ret = (poly16x8_t)(__p0); - return __ret; -} -__ai poly16x8_t vreinterpretq_p16_s32(int32x4_t __p0) { - poly16x8_t __ret; - __ret = (poly16x8_t)(__p0); - return __ret; -} -__ai poly16x8_t vreinterpretq_p16_s64(int64x2_t __p0) { - poly16x8_t __ret; - __ret = (poly16x8_t)(__p0); - return __ret; -} -__ai poly16x8_t vreinterpretq_p16_s16(int16x8_t __p0) { - poly16x8_t __ret; - __ret = (poly16x8_t)(__p0); - return __ret; -} -__ai uint8x16_t vreinterpretq_u8_p8(poly8x16_t __p0) { - uint8x16_t __ret; - __ret = (uint8x16_t)(__p0); - return __ret; -} -__ai uint8x16_t vreinterpretq_u8_p128(poly128_t __p0) { - uint8x16_t __ret; - __ret = (uint8x16_t)(__p0); - return __ret; -} -__ai uint8x16_t vreinterpretq_u8_p64(poly64x2_t __p0) { - uint8x16_t __ret; - __ret = (uint8x16_t)(__p0); - return __ret; -} -__ai uint8x16_t vreinterpretq_u8_p16(poly16x8_t __p0) { - uint8x16_t __ret; - __ret = (uint8x16_t)(__p0); - return __ret; -} -__ai uint8x16_t vreinterpretq_u8_u32(uint32x4_t __p0) { - uint8x16_t __ret; - __ret = (uint8x16_t)(__p0); - return __ret; -} -__ai uint8x16_t vreinterpretq_u8_u64(uint64x2_t __p0) { - uint8x16_t __ret; - __ret = (uint8x16_t)(__p0); - return __ret; -} -__ai uint8x16_t vreinterpretq_u8_u16(uint16x8_t __p0) { - uint8x16_t __ret; - __ret = (uint8x16_t)(__p0); - return __ret; -} -__ai uint8x16_t vreinterpretq_u8_s8(int8x16_t __p0) { - uint8x16_t __ret; - __ret = (uint8x16_t)(__p0); - return __ret; -} -__ai uint8x16_t vreinterpretq_u8_f64(float64x2_t __p0) { - uint8x16_t __ret; - __ret = (uint8x16_t)(__p0); - return __ret; -} -__ai uint8x16_t vreinterpretq_u8_f32(float32x4_t __p0) { - uint8x16_t __ret; - __ret = (uint8x16_t)(__p0); - return __ret; -} -__ai uint8x16_t vreinterpretq_u8_f16(float16x8_t __p0) { - uint8x16_t __ret; - __ret = (uint8x16_t)(__p0); - return __ret; -} -__ai uint8x16_t vreinterpretq_u8_s32(int32x4_t __p0) { - uint8x16_t __ret; - __ret = (uint8x16_t)(__p0); - return __ret; -} -__ai uint8x16_t vreinterpretq_u8_s64(int64x2_t __p0) { - uint8x16_t __ret; - __ret = (uint8x16_t)(__p0); - return __ret; -} -__ai uint8x16_t vreinterpretq_u8_s16(int16x8_t __p0) { - uint8x16_t __ret; - __ret = (uint8x16_t)(__p0); - return __ret; -} -__ai uint32x4_t vreinterpretq_u32_p8(poly8x16_t __p0) { - uint32x4_t __ret; - __ret = (uint32x4_t)(__p0); - return __ret; -} -__ai uint32x4_t vreinterpretq_u32_p128(poly128_t __p0) { - uint32x4_t __ret; - __ret = (uint32x4_t)(__p0); - return __ret; -} -__ai uint32x4_t vreinterpretq_u32_p64(poly64x2_t __p0) { - uint32x4_t __ret; - __ret = (uint32x4_t)(__p0); - return __ret; -} -__ai uint32x4_t vreinterpretq_u32_p16(poly16x8_t __p0) { - uint32x4_t __ret; - __ret = (uint32x4_t)(__p0); - return __ret; -} -__ai uint32x4_t vreinterpretq_u32_u8(uint8x16_t __p0) { - uint32x4_t __ret; - __ret = (uint32x4_t)(__p0); - return __ret; -} -__ai uint32x4_t vreinterpretq_u32_u64(uint64x2_t __p0) { - uint32x4_t __ret; - __ret = (uint32x4_t)(__p0); - return __ret; -} -__ai uint32x4_t vreinterpretq_u32_u16(uint16x8_t __p0) { - uint32x4_t __ret; - __ret = (uint32x4_t)(__p0); - return __ret; -} -__ai uint32x4_t vreinterpretq_u32_s8(int8x16_t __p0) { - uint32x4_t __ret; - __ret = (uint32x4_t)(__p0); - return __ret; -} -__ai uint32x4_t vreinterpretq_u32_f64(float64x2_t __p0) { - uint32x4_t __ret; - __ret = (uint32x4_t)(__p0); - return __ret; -} -__ai uint32x4_t vreinterpretq_u32_f32(float32x4_t __p0) { - uint32x4_t __ret; - __ret = (uint32x4_t)(__p0); - return __ret; -} -__ai uint32x4_t vreinterpretq_u32_f16(float16x8_t __p0) { - uint32x4_t __ret; - __ret = (uint32x4_t)(__p0); - return __ret; -} -__ai uint32x4_t vreinterpretq_u32_s32(int32x4_t __p0) { - uint32x4_t __ret; - __ret = (uint32x4_t)(__p0); - return __ret; -} -__ai uint32x4_t vreinterpretq_u32_s64(int64x2_t __p0) { - uint32x4_t __ret; - __ret = (uint32x4_t)(__p0); - return __ret; -} -__ai uint32x4_t vreinterpretq_u32_s16(int16x8_t __p0) { - uint32x4_t __ret; - __ret = (uint32x4_t)(__p0); - return __ret; -} -__ai uint64x2_t vreinterpretq_u64_p8(poly8x16_t __p0) { - uint64x2_t __ret; - __ret = (uint64x2_t)(__p0); - return __ret; -} -__ai uint64x2_t vreinterpretq_u64_p128(poly128_t __p0) { - uint64x2_t __ret; - __ret = (uint64x2_t)(__p0); - return __ret; -} -__ai uint64x2_t vreinterpretq_u64_p64(poly64x2_t __p0) { - uint64x2_t __ret; - __ret = (uint64x2_t)(__p0); - return __ret; -} -__ai uint64x2_t vreinterpretq_u64_p16(poly16x8_t __p0) { - uint64x2_t __ret; - __ret = (uint64x2_t)(__p0); - return __ret; -} -__ai uint64x2_t vreinterpretq_u64_u8(uint8x16_t __p0) { - uint64x2_t __ret; - __ret = (uint64x2_t)(__p0); - return __ret; -} -__ai uint64x2_t vreinterpretq_u64_u32(uint32x4_t __p0) { - uint64x2_t __ret; - __ret = (uint64x2_t)(__p0); - return __ret; -} -__ai uint64x2_t vreinterpretq_u64_u16(uint16x8_t __p0) { - uint64x2_t __ret; - __ret = (uint64x2_t)(__p0); - return __ret; -} -__ai uint64x2_t vreinterpretq_u64_s8(int8x16_t __p0) { - uint64x2_t __ret; - __ret = (uint64x2_t)(__p0); - return __ret; -} -__ai uint64x2_t vreinterpretq_u64_f64(float64x2_t __p0) { - uint64x2_t __ret; - __ret = (uint64x2_t)(__p0); - return __ret; -} -__ai uint64x2_t vreinterpretq_u64_f32(float32x4_t __p0) { - uint64x2_t __ret; - __ret = (uint64x2_t)(__p0); - return __ret; -} -__ai uint64x2_t vreinterpretq_u64_f16(float16x8_t __p0) { - uint64x2_t __ret; - __ret = (uint64x2_t)(__p0); - return __ret; -} -__ai uint64x2_t vreinterpretq_u64_s32(int32x4_t __p0) { - uint64x2_t __ret; - __ret = (uint64x2_t)(__p0); - return __ret; -} -__ai uint64x2_t vreinterpretq_u64_s64(int64x2_t __p0) { - uint64x2_t __ret; - __ret = (uint64x2_t)(__p0); - return __ret; -} -__ai uint64x2_t vreinterpretq_u64_s16(int16x8_t __p0) { - uint64x2_t __ret; - __ret = (uint64x2_t)(__p0); - return __ret; -} -__ai uint16x8_t vreinterpretq_u16_p8(poly8x16_t __p0) { - uint16x8_t __ret; - __ret = (uint16x8_t)(__p0); - return __ret; -} -__ai uint16x8_t vreinterpretq_u16_p128(poly128_t __p0) { - uint16x8_t __ret; - __ret = (uint16x8_t)(__p0); - return __ret; -} -__ai uint16x8_t vreinterpretq_u16_p64(poly64x2_t __p0) { - uint16x8_t __ret; - __ret = (uint16x8_t)(__p0); - return __ret; -} -__ai uint16x8_t vreinterpretq_u16_p16(poly16x8_t __p0) { - uint16x8_t __ret; - __ret = (uint16x8_t)(__p0); - return __ret; -} -__ai uint16x8_t vreinterpretq_u16_u8(uint8x16_t __p0) { - uint16x8_t __ret; - __ret = (uint16x8_t)(__p0); - return __ret; -} -__ai uint16x8_t vreinterpretq_u16_u32(uint32x4_t __p0) { - uint16x8_t __ret; - __ret = (uint16x8_t)(__p0); - return __ret; -} -__ai uint16x8_t vreinterpretq_u16_u64(uint64x2_t __p0) { - uint16x8_t __ret; - __ret = (uint16x8_t)(__p0); - return __ret; -} -__ai uint16x8_t vreinterpretq_u16_s8(int8x16_t __p0) { - uint16x8_t __ret; - __ret = (uint16x8_t)(__p0); - return __ret; -} -__ai uint16x8_t vreinterpretq_u16_f64(float64x2_t __p0) { - uint16x8_t __ret; - __ret = (uint16x8_t)(__p0); - return __ret; -} -__ai uint16x8_t vreinterpretq_u16_f32(float32x4_t __p0) { - uint16x8_t __ret; - __ret = (uint16x8_t)(__p0); - return __ret; -} -__ai uint16x8_t vreinterpretq_u16_f16(float16x8_t __p0) { - uint16x8_t __ret; - __ret = (uint16x8_t)(__p0); - return __ret; -} -__ai uint16x8_t vreinterpretq_u16_s32(int32x4_t __p0) { - uint16x8_t __ret; - __ret = (uint16x8_t)(__p0); - return __ret; -} -__ai uint16x8_t vreinterpretq_u16_s64(int64x2_t __p0) { - uint16x8_t __ret; - __ret = (uint16x8_t)(__p0); - return __ret; -} -__ai uint16x8_t vreinterpretq_u16_s16(int16x8_t __p0) { - uint16x8_t __ret; - __ret = (uint16x8_t)(__p0); - return __ret; -} -__ai int8x16_t vreinterpretq_s8_p8(poly8x16_t __p0) { - int8x16_t __ret; - __ret = (int8x16_t)(__p0); - return __ret; -} -__ai int8x16_t vreinterpretq_s8_p128(poly128_t __p0) { - int8x16_t __ret; - __ret = (int8x16_t)(__p0); - return __ret; -} -__ai int8x16_t vreinterpretq_s8_p64(poly64x2_t __p0) { - int8x16_t __ret; - __ret = (int8x16_t)(__p0); - return __ret; -} -__ai int8x16_t vreinterpretq_s8_p16(poly16x8_t __p0) { - int8x16_t __ret; - __ret = (int8x16_t)(__p0); - return __ret; -} -__ai int8x16_t vreinterpretq_s8_u8(uint8x16_t __p0) { - int8x16_t __ret; - __ret = (int8x16_t)(__p0); - return __ret; -} -__ai int8x16_t vreinterpretq_s8_u32(uint32x4_t __p0) { - int8x16_t __ret; - __ret = (int8x16_t)(__p0); - return __ret; -} -__ai int8x16_t vreinterpretq_s8_u64(uint64x2_t __p0) { - int8x16_t __ret; - __ret = (int8x16_t)(__p0); - return __ret; -} -__ai int8x16_t vreinterpretq_s8_u16(uint16x8_t __p0) { - int8x16_t __ret; - __ret = (int8x16_t)(__p0); - return __ret; -} -__ai int8x16_t vreinterpretq_s8_f64(float64x2_t __p0) { - int8x16_t __ret; - __ret = (int8x16_t)(__p0); - return __ret; -} -__ai int8x16_t vreinterpretq_s8_f32(float32x4_t __p0) { - int8x16_t __ret; - __ret = (int8x16_t)(__p0); - return __ret; -} -__ai int8x16_t vreinterpretq_s8_f16(float16x8_t __p0) { - int8x16_t __ret; - __ret = (int8x16_t)(__p0); - return __ret; -} -__ai int8x16_t vreinterpretq_s8_s32(int32x4_t __p0) { - int8x16_t __ret; - __ret = (int8x16_t)(__p0); - return __ret; -} -__ai int8x16_t vreinterpretq_s8_s64(int64x2_t __p0) { - int8x16_t __ret; - __ret = (int8x16_t)(__p0); - return __ret; -} -__ai int8x16_t vreinterpretq_s8_s16(int16x8_t __p0) { - int8x16_t __ret; - __ret = (int8x16_t)(__p0); - return __ret; -} -__ai float64x2_t vreinterpretq_f64_p8(poly8x16_t __p0) { - float64x2_t __ret; - __ret = (float64x2_t)(__p0); - return __ret; -} -__ai float64x2_t vreinterpretq_f64_p128(poly128_t __p0) { - float64x2_t __ret; - __ret = (float64x2_t)(__p0); - return __ret; -} -__ai float64x2_t vreinterpretq_f64_p64(poly64x2_t __p0) { - float64x2_t __ret; - __ret = (float64x2_t)(__p0); - return __ret; -} -__ai float64x2_t vreinterpretq_f64_p16(poly16x8_t __p0) { - float64x2_t __ret; - __ret = (float64x2_t)(__p0); - return __ret; -} -__ai float64x2_t vreinterpretq_f64_u8(uint8x16_t __p0) { - float64x2_t __ret; - __ret = (float64x2_t)(__p0); - return __ret; -} -__ai float64x2_t vreinterpretq_f64_u32(uint32x4_t __p0) { - float64x2_t __ret; - __ret = (float64x2_t)(__p0); - return __ret; -} -__ai float64x2_t vreinterpretq_f64_u64(uint64x2_t __p0) { - float64x2_t __ret; - __ret = (float64x2_t)(__p0); - return __ret; -} -__ai float64x2_t vreinterpretq_f64_u16(uint16x8_t __p0) { - float64x2_t __ret; - __ret = (float64x2_t)(__p0); - return __ret; -} -__ai float64x2_t vreinterpretq_f64_s8(int8x16_t __p0) { - float64x2_t __ret; - __ret = (float64x2_t)(__p0); - return __ret; -} -__ai float64x2_t vreinterpretq_f64_f32(float32x4_t __p0) { - float64x2_t __ret; - __ret = (float64x2_t)(__p0); - return __ret; -} -__ai float64x2_t vreinterpretq_f64_f16(float16x8_t __p0) { - float64x2_t __ret; - __ret = (float64x2_t)(__p0); - return __ret; -} -__ai float64x2_t vreinterpretq_f64_s32(int32x4_t __p0) { - float64x2_t __ret; - __ret = (float64x2_t)(__p0); - return __ret; -} -__ai float64x2_t vreinterpretq_f64_s64(int64x2_t __p0) { - float64x2_t __ret; - __ret = (float64x2_t)(__p0); - return __ret; -} -__ai float64x2_t vreinterpretq_f64_s16(int16x8_t __p0) { - float64x2_t __ret; - __ret = (float64x2_t)(__p0); - return __ret; -} -__ai float32x4_t vreinterpretq_f32_p8(poly8x16_t __p0) { - float32x4_t __ret; - __ret = (float32x4_t)(__p0); - return __ret; -} -__ai float32x4_t vreinterpretq_f32_p128(poly128_t __p0) { - float32x4_t __ret; - __ret = (float32x4_t)(__p0); - return __ret; -} -__ai float32x4_t vreinterpretq_f32_p64(poly64x2_t __p0) { - float32x4_t __ret; - __ret = (float32x4_t)(__p0); - return __ret; -} -__ai float32x4_t vreinterpretq_f32_p16(poly16x8_t __p0) { - float32x4_t __ret; - __ret = (float32x4_t)(__p0); - return __ret; -} -__ai float32x4_t vreinterpretq_f32_u8(uint8x16_t __p0) { - float32x4_t __ret; - __ret = (float32x4_t)(__p0); - return __ret; -} -__ai float32x4_t vreinterpretq_f32_u32(uint32x4_t __p0) { - float32x4_t __ret; - __ret = (float32x4_t)(__p0); - return __ret; -} -__ai float32x4_t vreinterpretq_f32_u64(uint64x2_t __p0) { - float32x4_t __ret; - __ret = (float32x4_t)(__p0); - return __ret; -} -__ai float32x4_t vreinterpretq_f32_u16(uint16x8_t __p0) { - float32x4_t __ret; - __ret = (float32x4_t)(__p0); - return __ret; -} -__ai float32x4_t vreinterpretq_f32_s8(int8x16_t __p0) { - float32x4_t __ret; - __ret = (float32x4_t)(__p0); - return __ret; -} -__ai float32x4_t vreinterpretq_f32_f64(float64x2_t __p0) { - float32x4_t __ret; - __ret = (float32x4_t)(__p0); - return __ret; -} -__ai float32x4_t vreinterpretq_f32_f16(float16x8_t __p0) { - float32x4_t __ret; - __ret = (float32x4_t)(__p0); - return __ret; -} -__ai float32x4_t vreinterpretq_f32_s32(int32x4_t __p0) { - float32x4_t __ret; - __ret = (float32x4_t)(__p0); - return __ret; -} -__ai float32x4_t vreinterpretq_f32_s64(int64x2_t __p0) { - float32x4_t __ret; - __ret = (float32x4_t)(__p0); - return __ret; -} -__ai float32x4_t vreinterpretq_f32_s16(int16x8_t __p0) { - float32x4_t __ret; - __ret = (float32x4_t)(__p0); - return __ret; -} -__ai float16x8_t vreinterpretq_f16_p8(poly8x16_t __p0) { - float16x8_t __ret; - __ret = (float16x8_t)(__p0); - return __ret; -} -__ai float16x8_t vreinterpretq_f16_p128(poly128_t __p0) { - float16x8_t __ret; - __ret = (float16x8_t)(__p0); - return __ret; -} -__ai float16x8_t vreinterpretq_f16_p64(poly64x2_t __p0) { - float16x8_t __ret; - __ret = (float16x8_t)(__p0); - return __ret; -} -__ai float16x8_t vreinterpretq_f16_p16(poly16x8_t __p0) { - float16x8_t __ret; - __ret = (float16x8_t)(__p0); - return __ret; -} -__ai float16x8_t vreinterpretq_f16_u8(uint8x16_t __p0) { - float16x8_t __ret; - __ret = (float16x8_t)(__p0); - return __ret; -} -__ai float16x8_t vreinterpretq_f16_u32(uint32x4_t __p0) { - float16x8_t __ret; - __ret = (float16x8_t)(__p0); - return __ret; -} -__ai float16x8_t vreinterpretq_f16_u64(uint64x2_t __p0) { - float16x8_t __ret; - __ret = (float16x8_t)(__p0); - return __ret; -} -__ai float16x8_t vreinterpretq_f16_u16(uint16x8_t __p0) { - float16x8_t __ret; - __ret = (float16x8_t)(__p0); - return __ret; -} -__ai float16x8_t vreinterpretq_f16_s8(int8x16_t __p0) { - float16x8_t __ret; - __ret = (float16x8_t)(__p0); - return __ret; -} -__ai float16x8_t vreinterpretq_f16_f64(float64x2_t __p0) { - float16x8_t __ret; - __ret = (float16x8_t)(__p0); - return __ret; -} -__ai float16x8_t vreinterpretq_f16_f32(float32x4_t __p0) { - float16x8_t __ret; - __ret = (float16x8_t)(__p0); - return __ret; -} -__ai float16x8_t vreinterpretq_f16_s32(int32x4_t __p0) { - float16x8_t __ret; - __ret = (float16x8_t)(__p0); - return __ret; -} -__ai float16x8_t vreinterpretq_f16_s64(int64x2_t __p0) { - float16x8_t __ret; - __ret = (float16x8_t)(__p0); - return __ret; -} -__ai float16x8_t vreinterpretq_f16_s16(int16x8_t __p0) { - float16x8_t __ret; - __ret = (float16x8_t)(__p0); - return __ret; -} -__ai int32x4_t vreinterpretq_s32_p8(poly8x16_t __p0) { - int32x4_t __ret; - __ret = (int32x4_t)(__p0); - return __ret; -} -__ai int32x4_t vreinterpretq_s32_p128(poly128_t __p0) { - int32x4_t __ret; - __ret = (int32x4_t)(__p0); - return __ret; -} -__ai int32x4_t vreinterpretq_s32_p64(poly64x2_t __p0) { - int32x4_t __ret; - __ret = (int32x4_t)(__p0); - return __ret; -} -__ai int32x4_t vreinterpretq_s32_p16(poly16x8_t __p0) { - int32x4_t __ret; - __ret = (int32x4_t)(__p0); - return __ret; -} -__ai int32x4_t vreinterpretq_s32_u8(uint8x16_t __p0) { - int32x4_t __ret; - __ret = (int32x4_t)(__p0); - return __ret; -} -__ai int32x4_t vreinterpretq_s32_u32(uint32x4_t __p0) { - int32x4_t __ret; - __ret = (int32x4_t)(__p0); - return __ret; -} -__ai int32x4_t vreinterpretq_s32_u64(uint64x2_t __p0) { - int32x4_t __ret; - __ret = (int32x4_t)(__p0); - return __ret; -} -__ai int32x4_t vreinterpretq_s32_u16(uint16x8_t __p0) { - int32x4_t __ret; - __ret = (int32x4_t)(__p0); - return __ret; -} -__ai int32x4_t vreinterpretq_s32_s8(int8x16_t __p0) { - int32x4_t __ret; - __ret = (int32x4_t)(__p0); - return __ret; -} -__ai int32x4_t vreinterpretq_s32_f64(float64x2_t __p0) { - int32x4_t __ret; - __ret = (int32x4_t)(__p0); - return __ret; -} -__ai int32x4_t vreinterpretq_s32_f32(float32x4_t __p0) { - int32x4_t __ret; - __ret = (int32x4_t)(__p0); - return __ret; -} -__ai int32x4_t vreinterpretq_s32_f16(float16x8_t __p0) { - int32x4_t __ret; - __ret = (int32x4_t)(__p0); - return __ret; -} -__ai int32x4_t vreinterpretq_s32_s64(int64x2_t __p0) { - int32x4_t __ret; - __ret = (int32x4_t)(__p0); - return __ret; -} -__ai int32x4_t vreinterpretq_s32_s16(int16x8_t __p0) { - int32x4_t __ret; - __ret = (int32x4_t)(__p0); - return __ret; -} -__ai int64x2_t vreinterpretq_s64_p8(poly8x16_t __p0) { - int64x2_t __ret; - __ret = (int64x2_t)(__p0); - return __ret; -} -__ai int64x2_t vreinterpretq_s64_p128(poly128_t __p0) { - int64x2_t __ret; - __ret = (int64x2_t)(__p0); - return __ret; -} -__ai int64x2_t vreinterpretq_s64_p64(poly64x2_t __p0) { - int64x2_t __ret; - __ret = (int64x2_t)(__p0); - return __ret; -} -__ai int64x2_t vreinterpretq_s64_p16(poly16x8_t __p0) { - int64x2_t __ret; - __ret = (int64x2_t)(__p0); - return __ret; -} -__ai int64x2_t vreinterpretq_s64_u8(uint8x16_t __p0) { - int64x2_t __ret; - __ret = (int64x2_t)(__p0); - return __ret; -} -__ai int64x2_t vreinterpretq_s64_u32(uint32x4_t __p0) { - int64x2_t __ret; - __ret = (int64x2_t)(__p0); - return __ret; -} -__ai int64x2_t vreinterpretq_s64_u64(uint64x2_t __p0) { - int64x2_t __ret; - __ret = (int64x2_t)(__p0); - return __ret; -} -__ai int64x2_t vreinterpretq_s64_u16(uint16x8_t __p0) { - int64x2_t __ret; - __ret = (int64x2_t)(__p0); - return __ret; -} -__ai int64x2_t vreinterpretq_s64_s8(int8x16_t __p0) { - int64x2_t __ret; - __ret = (int64x2_t)(__p0); - return __ret; -} -__ai int64x2_t vreinterpretq_s64_f64(float64x2_t __p0) { - int64x2_t __ret; - __ret = (int64x2_t)(__p0); - return __ret; -} -__ai int64x2_t vreinterpretq_s64_f32(float32x4_t __p0) { - int64x2_t __ret; - __ret = (int64x2_t)(__p0); - return __ret; -} -__ai int64x2_t vreinterpretq_s64_f16(float16x8_t __p0) { - int64x2_t __ret; - __ret = (int64x2_t)(__p0); - return __ret; -} -__ai int64x2_t vreinterpretq_s64_s32(int32x4_t __p0) { - int64x2_t __ret; - __ret = (int64x2_t)(__p0); - return __ret; -} -__ai int64x2_t vreinterpretq_s64_s16(int16x8_t __p0) { - int64x2_t __ret; - __ret = (int64x2_t)(__p0); - return __ret; -} -__ai int16x8_t vreinterpretq_s16_p8(poly8x16_t __p0) { - int16x8_t __ret; - __ret = (int16x8_t)(__p0); - return __ret; -} -__ai int16x8_t vreinterpretq_s16_p128(poly128_t __p0) { - int16x8_t __ret; - __ret = (int16x8_t)(__p0); - return __ret; -} -__ai int16x8_t vreinterpretq_s16_p64(poly64x2_t __p0) { - int16x8_t __ret; - __ret = (int16x8_t)(__p0); - return __ret; -} -__ai int16x8_t vreinterpretq_s16_p16(poly16x8_t __p0) { - int16x8_t __ret; - __ret = (int16x8_t)(__p0); - return __ret; -} -__ai int16x8_t vreinterpretq_s16_u8(uint8x16_t __p0) { - int16x8_t __ret; - __ret = (int16x8_t)(__p0); - return __ret; -} -__ai int16x8_t vreinterpretq_s16_u32(uint32x4_t __p0) { - int16x8_t __ret; - __ret = (int16x8_t)(__p0); - return __ret; -} -__ai int16x8_t vreinterpretq_s16_u64(uint64x2_t __p0) { - int16x8_t __ret; - __ret = (int16x8_t)(__p0); - return __ret; -} -__ai int16x8_t vreinterpretq_s16_u16(uint16x8_t __p0) { - int16x8_t __ret; - __ret = (int16x8_t)(__p0); - return __ret; -} -__ai int16x8_t vreinterpretq_s16_s8(int8x16_t __p0) { - int16x8_t __ret; - __ret = (int16x8_t)(__p0); - return __ret; -} -__ai int16x8_t vreinterpretq_s16_f64(float64x2_t __p0) { - int16x8_t __ret; - __ret = (int16x8_t)(__p0); - return __ret; -} -__ai int16x8_t vreinterpretq_s16_f32(float32x4_t __p0) { - int16x8_t __ret; - __ret = (int16x8_t)(__p0); - return __ret; -} -__ai int16x8_t vreinterpretq_s16_f16(float16x8_t __p0) { - int16x8_t __ret; - __ret = (int16x8_t)(__p0); - return __ret; -} -__ai int16x8_t vreinterpretq_s16_s32(int32x4_t __p0) { - int16x8_t __ret; - __ret = (int16x8_t)(__p0); - return __ret; -} -__ai int16x8_t vreinterpretq_s16_s64(int64x2_t __p0) { - int16x8_t __ret; - __ret = (int16x8_t)(__p0); - return __ret; -} -__ai uint8x8_t vreinterpret_u8_p8(poly8x8_t __p0) { - uint8x8_t __ret; - __ret = (uint8x8_t)(__p0); - return __ret; -} -__ai uint8x8_t vreinterpret_u8_p64(poly64x1_t __p0) { - uint8x8_t __ret; - __ret = (uint8x8_t)(__p0); - return __ret; -} -__ai uint8x8_t vreinterpret_u8_p16(poly16x4_t __p0) { - uint8x8_t __ret; - __ret = (uint8x8_t)(__p0); - return __ret; -} -__ai uint8x8_t vreinterpret_u8_u32(uint32x2_t __p0) { - uint8x8_t __ret; - __ret = (uint8x8_t)(__p0); - return __ret; -} -__ai uint8x8_t vreinterpret_u8_u64(uint64x1_t __p0) { - uint8x8_t __ret; - __ret = (uint8x8_t)(__p0); - return __ret; -} -__ai uint8x8_t vreinterpret_u8_u16(uint16x4_t __p0) { - uint8x8_t __ret; - __ret = (uint8x8_t)(__p0); - return __ret; -} -__ai uint8x8_t vreinterpret_u8_s8(int8x8_t __p0) { - uint8x8_t __ret; - __ret = (uint8x8_t)(__p0); - return __ret; -} -__ai uint8x8_t vreinterpret_u8_f64(float64x1_t __p0) { - uint8x8_t __ret; - __ret = (uint8x8_t)(__p0); - return __ret; -} -__ai uint8x8_t vreinterpret_u8_f32(float32x2_t __p0) { - uint8x8_t __ret; - __ret = (uint8x8_t)(__p0); - return __ret; -} -__ai uint8x8_t vreinterpret_u8_f16(float16x4_t __p0) { - uint8x8_t __ret; - __ret = (uint8x8_t)(__p0); - return __ret; -} -__ai uint8x8_t vreinterpret_u8_s32(int32x2_t __p0) { - uint8x8_t __ret; - __ret = (uint8x8_t)(__p0); - return __ret; -} -__ai uint8x8_t vreinterpret_u8_s64(int64x1_t __p0) { - uint8x8_t __ret; - __ret = (uint8x8_t)(__p0); - return __ret; -} -__ai uint8x8_t vreinterpret_u8_s16(int16x4_t __p0) { - uint8x8_t __ret; - __ret = (uint8x8_t)(__p0); - return __ret; -} -__ai uint32x2_t vreinterpret_u32_p8(poly8x8_t __p0) { - uint32x2_t __ret; - __ret = (uint32x2_t)(__p0); - return __ret; -} -__ai uint32x2_t vreinterpret_u32_p64(poly64x1_t __p0) { - uint32x2_t __ret; - __ret = (uint32x2_t)(__p0); - return __ret; -} -__ai uint32x2_t vreinterpret_u32_p16(poly16x4_t __p0) { - uint32x2_t __ret; - __ret = (uint32x2_t)(__p0); - return __ret; -} -__ai uint32x2_t vreinterpret_u32_u8(uint8x8_t __p0) { - uint32x2_t __ret; - __ret = (uint32x2_t)(__p0); - return __ret; -} -__ai uint32x2_t vreinterpret_u32_u64(uint64x1_t __p0) { - uint32x2_t __ret; - __ret = (uint32x2_t)(__p0); - return __ret; -} -__ai uint32x2_t vreinterpret_u32_u16(uint16x4_t __p0) { - uint32x2_t __ret; - __ret = (uint32x2_t)(__p0); - return __ret; -} -__ai uint32x2_t vreinterpret_u32_s8(int8x8_t __p0) { - uint32x2_t __ret; - __ret = (uint32x2_t)(__p0); - return __ret; -} -__ai uint32x2_t vreinterpret_u32_f64(float64x1_t __p0) { - uint32x2_t __ret; - __ret = (uint32x2_t)(__p0); - return __ret; -} -__ai uint32x2_t vreinterpret_u32_f32(float32x2_t __p0) { - uint32x2_t __ret; - __ret = (uint32x2_t)(__p0); - return __ret; -} -__ai uint32x2_t vreinterpret_u32_f16(float16x4_t __p0) { - uint32x2_t __ret; - __ret = (uint32x2_t)(__p0); - return __ret; -} -__ai uint32x2_t vreinterpret_u32_s32(int32x2_t __p0) { - uint32x2_t __ret; - __ret = (uint32x2_t)(__p0); - return __ret; -} -__ai uint32x2_t vreinterpret_u32_s64(int64x1_t __p0) { - uint32x2_t __ret; - __ret = (uint32x2_t)(__p0); - return __ret; -} -__ai uint32x2_t vreinterpret_u32_s16(int16x4_t __p0) { - uint32x2_t __ret; - __ret = (uint32x2_t)(__p0); - return __ret; -} -__ai uint64x1_t vreinterpret_u64_p8(poly8x8_t __p0) { - uint64x1_t __ret; - __ret = (uint64x1_t)(__p0); - return __ret; -} -__ai uint64x1_t vreinterpret_u64_p64(poly64x1_t __p0) { - uint64x1_t __ret; - __ret = (uint64x1_t)(__p0); - return __ret; -} -__ai uint64x1_t vreinterpret_u64_p16(poly16x4_t __p0) { - uint64x1_t __ret; - __ret = (uint64x1_t)(__p0); - return __ret; -} -__ai uint64x1_t vreinterpret_u64_u8(uint8x8_t __p0) { - uint64x1_t __ret; - __ret = (uint64x1_t)(__p0); - return __ret; -} -__ai uint64x1_t vreinterpret_u64_u32(uint32x2_t __p0) { - uint64x1_t __ret; - __ret = (uint64x1_t)(__p0); - return __ret; -} -__ai uint64x1_t vreinterpret_u64_u16(uint16x4_t __p0) { - uint64x1_t __ret; - __ret = (uint64x1_t)(__p0); - return __ret; -} -__ai uint64x1_t vreinterpret_u64_s8(int8x8_t __p0) { - uint64x1_t __ret; - __ret = (uint64x1_t)(__p0); - return __ret; -} -__ai uint64x1_t vreinterpret_u64_f64(float64x1_t __p0) { - uint64x1_t __ret; - __ret = (uint64x1_t)(__p0); - return __ret; -} -__ai uint64x1_t vreinterpret_u64_f32(float32x2_t __p0) { - uint64x1_t __ret; - __ret = (uint64x1_t)(__p0); - return __ret; -} -__ai uint64x1_t vreinterpret_u64_f16(float16x4_t __p0) { - uint64x1_t __ret; - __ret = (uint64x1_t)(__p0); - return __ret; -} -__ai uint64x1_t vreinterpret_u64_s32(int32x2_t __p0) { - uint64x1_t __ret; - __ret = (uint64x1_t)(__p0); - return __ret; -} -__ai uint64x1_t vreinterpret_u64_s64(int64x1_t __p0) { - uint64x1_t __ret; - __ret = (uint64x1_t)(__p0); - return __ret; -} -__ai uint64x1_t vreinterpret_u64_s16(int16x4_t __p0) { - uint64x1_t __ret; - __ret = (uint64x1_t)(__p0); - return __ret; -} -__ai uint16x4_t vreinterpret_u16_p8(poly8x8_t __p0) { - uint16x4_t __ret; - __ret = (uint16x4_t)(__p0); - return __ret; -} -__ai uint16x4_t vreinterpret_u16_p64(poly64x1_t __p0) { - uint16x4_t __ret; - __ret = (uint16x4_t)(__p0); - return __ret; -} -__ai uint16x4_t vreinterpret_u16_p16(poly16x4_t __p0) { - uint16x4_t __ret; - __ret = (uint16x4_t)(__p0); - return __ret; -} -__ai uint16x4_t vreinterpret_u16_u8(uint8x8_t __p0) { - uint16x4_t __ret; - __ret = (uint16x4_t)(__p0); - return __ret; -} -__ai uint16x4_t vreinterpret_u16_u32(uint32x2_t __p0) { - uint16x4_t __ret; - __ret = (uint16x4_t)(__p0); - return __ret; -} -__ai uint16x4_t vreinterpret_u16_u64(uint64x1_t __p0) { - uint16x4_t __ret; - __ret = (uint16x4_t)(__p0); - return __ret; -} -__ai uint16x4_t vreinterpret_u16_s8(int8x8_t __p0) { - uint16x4_t __ret; - __ret = (uint16x4_t)(__p0); - return __ret; -} -__ai uint16x4_t vreinterpret_u16_f64(float64x1_t __p0) { - uint16x4_t __ret; - __ret = (uint16x4_t)(__p0); - return __ret; -} -__ai uint16x4_t vreinterpret_u16_f32(float32x2_t __p0) { - uint16x4_t __ret; - __ret = (uint16x4_t)(__p0); - return __ret; -} -__ai uint16x4_t vreinterpret_u16_f16(float16x4_t __p0) { - uint16x4_t __ret; - __ret = (uint16x4_t)(__p0); - return __ret; -} -__ai uint16x4_t vreinterpret_u16_s32(int32x2_t __p0) { - uint16x4_t __ret; - __ret = (uint16x4_t)(__p0); - return __ret; -} -__ai uint16x4_t vreinterpret_u16_s64(int64x1_t __p0) { - uint16x4_t __ret; - __ret = (uint16x4_t)(__p0); - return __ret; -} -__ai uint16x4_t vreinterpret_u16_s16(int16x4_t __p0) { - uint16x4_t __ret; - __ret = (uint16x4_t)(__p0); - return __ret; -} -__ai int8x8_t vreinterpret_s8_p8(poly8x8_t __p0) { - int8x8_t __ret; - __ret = (int8x8_t)(__p0); - return __ret; -} -__ai int8x8_t vreinterpret_s8_p64(poly64x1_t __p0) { - int8x8_t __ret; - __ret = (int8x8_t)(__p0); - return __ret; -} -__ai int8x8_t vreinterpret_s8_p16(poly16x4_t __p0) { - int8x8_t __ret; - __ret = (int8x8_t)(__p0); - return __ret; -} -__ai int8x8_t vreinterpret_s8_u8(uint8x8_t __p0) { - int8x8_t __ret; - __ret = (int8x8_t)(__p0); - return __ret; -} -__ai int8x8_t vreinterpret_s8_u32(uint32x2_t __p0) { - int8x8_t __ret; - __ret = (int8x8_t)(__p0); - return __ret; -} -__ai int8x8_t vreinterpret_s8_u64(uint64x1_t __p0) { - int8x8_t __ret; - __ret = (int8x8_t)(__p0); - return __ret; -} -__ai int8x8_t vreinterpret_s8_u16(uint16x4_t __p0) { - int8x8_t __ret; - __ret = (int8x8_t)(__p0); - return __ret; -} -__ai int8x8_t vreinterpret_s8_f64(float64x1_t __p0) { - int8x8_t __ret; - __ret = (int8x8_t)(__p0); - return __ret; -} -__ai int8x8_t vreinterpret_s8_f32(float32x2_t __p0) { - int8x8_t __ret; - __ret = (int8x8_t)(__p0); - return __ret; -} -__ai int8x8_t vreinterpret_s8_f16(float16x4_t __p0) { - int8x8_t __ret; - __ret = (int8x8_t)(__p0); - return __ret; -} -__ai int8x8_t vreinterpret_s8_s32(int32x2_t __p0) { - int8x8_t __ret; - __ret = (int8x8_t)(__p0); - return __ret; -} -__ai int8x8_t vreinterpret_s8_s64(int64x1_t __p0) { - int8x8_t __ret; - __ret = (int8x8_t)(__p0); - return __ret; -} -__ai int8x8_t vreinterpret_s8_s16(int16x4_t __p0) { - int8x8_t __ret; - __ret = (int8x8_t)(__p0); - return __ret; -} -__ai float64x1_t vreinterpret_f64_p8(poly8x8_t __p0) { - float64x1_t __ret; - __ret = (float64x1_t)(__p0); - return __ret; -} -__ai float64x1_t vreinterpret_f64_p64(poly64x1_t __p0) { - float64x1_t __ret; - __ret = (float64x1_t)(__p0); - return __ret; -} -__ai float64x1_t vreinterpret_f64_p16(poly16x4_t __p0) { - float64x1_t __ret; - __ret = (float64x1_t)(__p0); - return __ret; -} -__ai float64x1_t vreinterpret_f64_u8(uint8x8_t __p0) { - float64x1_t __ret; - __ret = (float64x1_t)(__p0); - return __ret; -} -__ai float64x1_t vreinterpret_f64_u32(uint32x2_t __p0) { - float64x1_t __ret; - __ret = (float64x1_t)(__p0); - return __ret; -} -__ai float64x1_t vreinterpret_f64_u64(uint64x1_t __p0) { - float64x1_t __ret; - __ret = (float64x1_t)(__p0); - return __ret; -} -__ai float64x1_t vreinterpret_f64_u16(uint16x4_t __p0) { - float64x1_t __ret; - __ret = (float64x1_t)(__p0); - return __ret; -} -__ai float64x1_t vreinterpret_f64_s8(int8x8_t __p0) { - float64x1_t __ret; - __ret = (float64x1_t)(__p0); - return __ret; -} -__ai float64x1_t vreinterpret_f64_f32(float32x2_t __p0) { - float64x1_t __ret; - __ret = (float64x1_t)(__p0); - return __ret; -} -__ai float64x1_t vreinterpret_f64_f16(float16x4_t __p0) { - float64x1_t __ret; - __ret = (float64x1_t)(__p0); - return __ret; -} -__ai float64x1_t vreinterpret_f64_s32(int32x2_t __p0) { - float64x1_t __ret; - __ret = (float64x1_t)(__p0); - return __ret; -} -__ai float64x1_t vreinterpret_f64_s64(int64x1_t __p0) { - float64x1_t __ret; - __ret = (float64x1_t)(__p0); - return __ret; -} -__ai float64x1_t vreinterpret_f64_s16(int16x4_t __p0) { - float64x1_t __ret; - __ret = (float64x1_t)(__p0); - return __ret; -} -__ai float32x2_t vreinterpret_f32_p8(poly8x8_t __p0) { - float32x2_t __ret; - __ret = (float32x2_t)(__p0); - return __ret; -} -__ai float32x2_t vreinterpret_f32_p64(poly64x1_t __p0) { - float32x2_t __ret; - __ret = (float32x2_t)(__p0); - return __ret; -} -__ai float32x2_t vreinterpret_f32_p16(poly16x4_t __p0) { - float32x2_t __ret; - __ret = (float32x2_t)(__p0); - return __ret; -} -__ai float32x2_t vreinterpret_f32_u8(uint8x8_t __p0) { - float32x2_t __ret; - __ret = (float32x2_t)(__p0); - return __ret; -} -__ai float32x2_t vreinterpret_f32_u32(uint32x2_t __p0) { - float32x2_t __ret; - __ret = (float32x2_t)(__p0); - return __ret; -} -__ai float32x2_t vreinterpret_f32_u64(uint64x1_t __p0) { - float32x2_t __ret; - __ret = (float32x2_t)(__p0); - return __ret; -} -__ai float32x2_t vreinterpret_f32_u16(uint16x4_t __p0) { - float32x2_t __ret; - __ret = (float32x2_t)(__p0); - return __ret; -} -__ai float32x2_t vreinterpret_f32_s8(int8x8_t __p0) { - float32x2_t __ret; - __ret = (float32x2_t)(__p0); - return __ret; -} -__ai float32x2_t vreinterpret_f32_f64(float64x1_t __p0) { - float32x2_t __ret; - __ret = (float32x2_t)(__p0); - return __ret; -} -__ai float32x2_t vreinterpret_f32_f16(float16x4_t __p0) { - float32x2_t __ret; - __ret = (float32x2_t)(__p0); - return __ret; -} -__ai float32x2_t vreinterpret_f32_s32(int32x2_t __p0) { - float32x2_t __ret; - __ret = (float32x2_t)(__p0); - return __ret; -} -__ai float32x2_t vreinterpret_f32_s64(int64x1_t __p0) { - float32x2_t __ret; - __ret = (float32x2_t)(__p0); - return __ret; -} -__ai float32x2_t vreinterpret_f32_s16(int16x4_t __p0) { - float32x2_t __ret; - __ret = (float32x2_t)(__p0); - return __ret; -} -__ai float16x4_t vreinterpret_f16_p8(poly8x8_t __p0) { - float16x4_t __ret; - __ret = (float16x4_t)(__p0); - return __ret; -} -__ai float16x4_t vreinterpret_f16_p64(poly64x1_t __p0) { - float16x4_t __ret; - __ret = (float16x4_t)(__p0); - return __ret; -} -__ai float16x4_t vreinterpret_f16_p16(poly16x4_t __p0) { - float16x4_t __ret; - __ret = (float16x4_t)(__p0); - return __ret; -} -__ai float16x4_t vreinterpret_f16_u8(uint8x8_t __p0) { - float16x4_t __ret; - __ret = (float16x4_t)(__p0); - return __ret; -} -__ai float16x4_t vreinterpret_f16_u32(uint32x2_t __p0) { - float16x4_t __ret; - __ret = (float16x4_t)(__p0); - return __ret; -} -__ai float16x4_t vreinterpret_f16_u64(uint64x1_t __p0) { - float16x4_t __ret; - __ret = (float16x4_t)(__p0); - return __ret; -} -__ai float16x4_t vreinterpret_f16_u16(uint16x4_t __p0) { - float16x4_t __ret; - __ret = (float16x4_t)(__p0); - return __ret; -} -__ai float16x4_t vreinterpret_f16_s8(int8x8_t __p0) { - float16x4_t __ret; - __ret = (float16x4_t)(__p0); - return __ret; -} -__ai float16x4_t vreinterpret_f16_f64(float64x1_t __p0) { - float16x4_t __ret; - __ret = (float16x4_t)(__p0); - return __ret; -} -__ai float16x4_t vreinterpret_f16_f32(float32x2_t __p0) { - float16x4_t __ret; - __ret = (float16x4_t)(__p0); - return __ret; -} -__ai float16x4_t vreinterpret_f16_s32(int32x2_t __p0) { - float16x4_t __ret; - __ret = (float16x4_t)(__p0); - return __ret; -} -__ai float16x4_t vreinterpret_f16_s64(int64x1_t __p0) { - float16x4_t __ret; - __ret = (float16x4_t)(__p0); - return __ret; -} -__ai float16x4_t vreinterpret_f16_s16(int16x4_t __p0) { - float16x4_t __ret; - __ret = (float16x4_t)(__p0); - return __ret; -} -__ai int32x2_t vreinterpret_s32_p8(poly8x8_t __p0) { - int32x2_t __ret; - __ret = (int32x2_t)(__p0); - return __ret; -} -__ai int32x2_t vreinterpret_s32_p64(poly64x1_t __p0) { - int32x2_t __ret; - __ret = (int32x2_t)(__p0); - return __ret; -} -__ai int32x2_t vreinterpret_s32_p16(poly16x4_t __p0) { - int32x2_t __ret; - __ret = (int32x2_t)(__p0); - return __ret; -} -__ai int32x2_t vreinterpret_s32_u8(uint8x8_t __p0) { - int32x2_t __ret; - __ret = (int32x2_t)(__p0); - return __ret; -} -__ai int32x2_t vreinterpret_s32_u32(uint32x2_t __p0) { - int32x2_t __ret; - __ret = (int32x2_t)(__p0); - return __ret; -} -__ai int32x2_t vreinterpret_s32_u64(uint64x1_t __p0) { - int32x2_t __ret; - __ret = (int32x2_t)(__p0); - return __ret; -} -__ai int32x2_t vreinterpret_s32_u16(uint16x4_t __p0) { - int32x2_t __ret; - __ret = (int32x2_t)(__p0); - return __ret; -} -__ai int32x2_t vreinterpret_s32_s8(int8x8_t __p0) { - int32x2_t __ret; - __ret = (int32x2_t)(__p0); - return __ret; -} -__ai int32x2_t vreinterpret_s32_f64(float64x1_t __p0) { - int32x2_t __ret; - __ret = (int32x2_t)(__p0); - return __ret; -} -__ai int32x2_t vreinterpret_s32_f32(float32x2_t __p0) { - int32x2_t __ret; - __ret = (int32x2_t)(__p0); - return __ret; -} -__ai int32x2_t vreinterpret_s32_f16(float16x4_t __p0) { - int32x2_t __ret; - __ret = (int32x2_t)(__p0); - return __ret; -} -__ai int32x2_t vreinterpret_s32_s64(int64x1_t __p0) { - int32x2_t __ret; - __ret = (int32x2_t)(__p0); - return __ret; -} -__ai int32x2_t vreinterpret_s32_s16(int16x4_t __p0) { - int32x2_t __ret; - __ret = (int32x2_t)(__p0); - return __ret; -} -__ai int64x1_t vreinterpret_s64_p8(poly8x8_t __p0) { - int64x1_t __ret; - __ret = (int64x1_t)(__p0); - return __ret; -} -__ai int64x1_t vreinterpret_s64_p64(poly64x1_t __p0) { - int64x1_t __ret; - __ret = (int64x1_t)(__p0); - return __ret; -} -__ai int64x1_t vreinterpret_s64_p16(poly16x4_t __p0) { - int64x1_t __ret; - __ret = (int64x1_t)(__p0); - return __ret; -} -__ai int64x1_t vreinterpret_s64_u8(uint8x8_t __p0) { - int64x1_t __ret; - __ret = (int64x1_t)(__p0); - return __ret; -} -__ai int64x1_t vreinterpret_s64_u32(uint32x2_t __p0) { - int64x1_t __ret; - __ret = (int64x1_t)(__p0); - return __ret; -} -__ai int64x1_t vreinterpret_s64_u64(uint64x1_t __p0) { - int64x1_t __ret; - __ret = (int64x1_t)(__p0); - return __ret; -} -__ai int64x1_t vreinterpret_s64_u16(uint16x4_t __p0) { - int64x1_t __ret; - __ret = (int64x1_t)(__p0); - return __ret; -} -__ai int64x1_t vreinterpret_s64_s8(int8x8_t __p0) { - int64x1_t __ret; - __ret = (int64x1_t)(__p0); - return __ret; -} -__ai int64x1_t vreinterpret_s64_f64(float64x1_t __p0) { - int64x1_t __ret; - __ret = (int64x1_t)(__p0); - return __ret; -} -__ai int64x1_t vreinterpret_s64_f32(float32x2_t __p0) { - int64x1_t __ret; - __ret = (int64x1_t)(__p0); - return __ret; -} -__ai int64x1_t vreinterpret_s64_f16(float16x4_t __p0) { - int64x1_t __ret; - __ret = (int64x1_t)(__p0); - return __ret; -} -__ai int64x1_t vreinterpret_s64_s32(int32x2_t __p0) { - int64x1_t __ret; - __ret = (int64x1_t)(__p0); - return __ret; -} -__ai int64x1_t vreinterpret_s64_s16(int16x4_t __p0) { - int64x1_t __ret; - __ret = (int64x1_t)(__p0); - return __ret; -} -__ai int16x4_t vreinterpret_s16_p8(poly8x8_t __p0) { - int16x4_t __ret; - __ret = (int16x4_t)(__p0); - return __ret; -} -__ai int16x4_t vreinterpret_s16_p64(poly64x1_t __p0) { - int16x4_t __ret; - __ret = (int16x4_t)(__p0); - return __ret; -} -__ai int16x4_t vreinterpret_s16_p16(poly16x4_t __p0) { - int16x4_t __ret; - __ret = (int16x4_t)(__p0); - return __ret; -} -__ai int16x4_t vreinterpret_s16_u8(uint8x8_t __p0) { - int16x4_t __ret; - __ret = (int16x4_t)(__p0); - return __ret; -} -__ai int16x4_t vreinterpret_s16_u32(uint32x2_t __p0) { - int16x4_t __ret; - __ret = (int16x4_t)(__p0); - return __ret; -} -__ai int16x4_t vreinterpret_s16_u64(uint64x1_t __p0) { - int16x4_t __ret; - __ret = (int16x4_t)(__p0); - return __ret; -} -__ai int16x4_t vreinterpret_s16_u16(uint16x4_t __p0) { - int16x4_t __ret; - __ret = (int16x4_t)(__p0); - return __ret; -} -__ai int16x4_t vreinterpret_s16_s8(int8x8_t __p0) { - int16x4_t __ret; - __ret = (int16x4_t)(__p0); - return __ret; -} -__ai int16x4_t vreinterpret_s16_f64(float64x1_t __p0) { - int16x4_t __ret; - __ret = (int16x4_t)(__p0); - return __ret; -} -__ai int16x4_t vreinterpret_s16_f32(float32x2_t __p0) { - int16x4_t __ret; - __ret = (int16x4_t)(__p0); - return __ret; -} -__ai int16x4_t vreinterpret_s16_f16(float16x4_t __p0) { - int16x4_t __ret; - __ret = (int16x4_t)(__p0); - return __ret; -} -__ai int16x4_t vreinterpret_s16_s32(int32x2_t __p0) { - int16x4_t __ret; - __ret = (int16x4_t)(__p0); - return __ret; -} -__ai int16x4_t vreinterpret_s16_s64(int64x1_t __p0) { - int16x4_t __ret; - __ret = (int16x4_t)(__p0); - return __ret; -} -#endif -#if __ARM_ARCH >= 8 && defined(__aarch64__) && defined(__ARM_FEATURE_DIRECTED_ROUNDING) -#ifdef __LITTLE_ENDIAN__ -__ai float64x2_t vrndq_f64(float64x2_t __p0) { - float64x2_t __ret; - __ret = (float64x2_t) __builtin_neon_vrndq_v((int8x16_t)__p0, 42); - return __ret; -} -#else -__ai float64x2_t vrndq_f64(float64x2_t __p0) { - float64x2_t __ret; - float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - __ret = (float64x2_t) __builtin_neon_vrndq_v((int8x16_t)__rev0, 42); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -__ai float64x1_t vrnd_f64(float64x1_t __p0) { - float64x1_t __ret; - __ret = (float64x1_t) __builtin_neon_vrnd_v((int8x8_t)__p0, 10); - return __ret; -} -#ifdef __LITTLE_ENDIAN__ -__ai float64x2_t vrndaq_f64(float64x2_t __p0) { - float64x2_t __ret; - __ret = (float64x2_t) __builtin_neon_vrndaq_v((int8x16_t)__p0, 42); - return __ret; -} -#else -__ai float64x2_t vrndaq_f64(float64x2_t __p0) { - float64x2_t __ret; - float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - __ret = (float64x2_t) __builtin_neon_vrndaq_v((int8x16_t)__rev0, 42); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -__ai float64x1_t vrnda_f64(float64x1_t __p0) { - float64x1_t __ret; - __ret = (float64x1_t) __builtin_neon_vrnda_v((int8x8_t)__p0, 10); - return __ret; -} -#ifdef __LITTLE_ENDIAN__ -__ai float64x2_t vrndiq_f64(float64x2_t __p0) { - float64x2_t __ret; - __ret = (float64x2_t) __builtin_neon_vrndiq_v((int8x16_t)__p0, 42); - return __ret; -} -#else -__ai float64x2_t vrndiq_f64(float64x2_t __p0) { - float64x2_t __ret; - float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - __ret = (float64x2_t) __builtin_neon_vrndiq_v((int8x16_t)__rev0, 42); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -__ai float64x1_t vrndi_f64(float64x1_t __p0) { - float64x1_t __ret; - __ret = (float64x1_t) __builtin_neon_vrndi_v((int8x8_t)__p0, 10); - return __ret; -} -#ifdef __LITTLE_ENDIAN__ -__ai float64x2_t vrndmq_f64(float64x2_t __p0) { - float64x2_t __ret; - __ret = (float64x2_t) __builtin_neon_vrndmq_v((int8x16_t)__p0, 42); - return __ret; -} -#else -__ai float64x2_t vrndmq_f64(float64x2_t __p0) { - float64x2_t __ret; - float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - __ret = (float64x2_t) __builtin_neon_vrndmq_v((int8x16_t)__rev0, 42); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -__ai float64x1_t vrndm_f64(float64x1_t __p0) { - float64x1_t __ret; - __ret = (float64x1_t) __builtin_neon_vrndm_v((int8x8_t)__p0, 10); - return __ret; -} -#ifdef __LITTLE_ENDIAN__ -__ai float64x2_t vrndnq_f64(float64x2_t __p0) { - float64x2_t __ret; - __ret = (float64x2_t) __builtin_neon_vrndnq_v((int8x16_t)__p0, 42); - return __ret; -} -#else -__ai float64x2_t vrndnq_f64(float64x2_t __p0) { - float64x2_t __ret; - float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - __ret = (float64x2_t) __builtin_neon_vrndnq_v((int8x16_t)__rev0, 42); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -__ai float64x1_t vrndn_f64(float64x1_t __p0) { - float64x1_t __ret; - __ret = (float64x1_t) __builtin_neon_vrndn_v((int8x8_t)__p0, 10); - return __ret; -} -#ifdef __LITTLE_ENDIAN__ -__ai float64x2_t vrndpq_f64(float64x2_t __p0) { - float64x2_t __ret; - __ret = (float64x2_t) __builtin_neon_vrndpq_v((int8x16_t)__p0, 42); - return __ret; -} -#else -__ai float64x2_t vrndpq_f64(float64x2_t __p0) { - float64x2_t __ret; - float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - __ret = (float64x2_t) __builtin_neon_vrndpq_v((int8x16_t)__rev0, 42); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -__ai float64x1_t vrndp_f64(float64x1_t __p0) { - float64x1_t __ret; - __ret = (float64x1_t) __builtin_neon_vrndp_v((int8x8_t)__p0, 10); - return __ret; -} -#ifdef __LITTLE_ENDIAN__ -__ai float64x2_t vrndxq_f64(float64x2_t __p0) { - float64x2_t __ret; - __ret = (float64x2_t) __builtin_neon_vrndxq_v((int8x16_t)__p0, 42); - return __ret; -} -#else -__ai float64x2_t vrndxq_f64(float64x2_t __p0) { - float64x2_t __ret; - float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - __ret = (float64x2_t) __builtin_neon_vrndxq_v((int8x16_t)__rev0, 42); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -__ai float64x1_t vrndx_f64(float64x1_t __p0) { - float64x1_t __ret; - __ret = (float64x1_t) __builtin_neon_vrndx_v((int8x8_t)__p0, 10); - return __ret; -} -#endif -#if __ARM_ARCH >= 8 && defined(__aarch64__) && defined(__ARM_FEATURE_FRINT) -#ifdef __LITTLE_ENDIAN__ -__ai float32x4_t vrnd32xq_f32(float32x4_t __p0) { - float32x4_t __ret; - __ret = (float32x4_t) __builtin_neon_vrnd32xq_v((int8x16_t)__p0, 41); - return __ret; -} -#else -__ai float32x4_t vrnd32xq_f32(float32x4_t __p0) { - float32x4_t __ret; - float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - __ret = (float32x4_t) __builtin_neon_vrnd32xq_v((int8x16_t)__rev0, 41); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float32x2_t vrnd32x_f32(float32x2_t __p0) { - float32x2_t __ret; - __ret = (float32x2_t) __builtin_neon_vrnd32x_v((int8x8_t)__p0, 9); - return __ret; -} -#else -__ai float32x2_t vrnd32x_f32(float32x2_t __p0) { - float32x2_t __ret; - float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - __ret = (float32x2_t) __builtin_neon_vrnd32x_v((int8x8_t)__rev0, 9); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float32x4_t vrnd32zq_f32(float32x4_t __p0) { - float32x4_t __ret; - __ret = (float32x4_t) __builtin_neon_vrnd32zq_v((int8x16_t)__p0, 41); - return __ret; -} -#else -__ai float32x4_t vrnd32zq_f32(float32x4_t __p0) { - float32x4_t __ret; - float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - __ret = (float32x4_t) __builtin_neon_vrnd32zq_v((int8x16_t)__rev0, 41); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float32x2_t vrnd32z_f32(float32x2_t __p0) { - float32x2_t __ret; - __ret = (float32x2_t) __builtin_neon_vrnd32z_v((int8x8_t)__p0, 9); - return __ret; -} -#else -__ai float32x2_t vrnd32z_f32(float32x2_t __p0) { - float32x2_t __ret; - float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - __ret = (float32x2_t) __builtin_neon_vrnd32z_v((int8x8_t)__rev0, 9); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float32x4_t vrnd64xq_f32(float32x4_t __p0) { - float32x4_t __ret; - __ret = (float32x4_t) __builtin_neon_vrnd64xq_v((int8x16_t)__p0, 41); - return __ret; -} -#else -__ai float32x4_t vrnd64xq_f32(float32x4_t __p0) { - float32x4_t __ret; - float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - __ret = (float32x4_t) __builtin_neon_vrnd64xq_v((int8x16_t)__rev0, 41); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float32x2_t vrnd64x_f32(float32x2_t __p0) { - float32x2_t __ret; - __ret = (float32x2_t) __builtin_neon_vrnd64x_v((int8x8_t)__p0, 9); - return __ret; -} -#else -__ai float32x2_t vrnd64x_f32(float32x2_t __p0) { - float32x2_t __ret; - float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - __ret = (float32x2_t) __builtin_neon_vrnd64x_v((int8x8_t)__rev0, 9); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float32x4_t vrnd64zq_f32(float32x4_t __p0) { - float32x4_t __ret; - __ret = (float32x4_t) __builtin_neon_vrnd64zq_v((int8x16_t)__p0, 41); - return __ret; -} -#else -__ai float32x4_t vrnd64zq_f32(float32x4_t __p0) { - float32x4_t __ret; - float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - __ret = (float32x4_t) __builtin_neon_vrnd64zq_v((int8x16_t)__rev0, 41); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float32x2_t vrnd64z_f32(float32x2_t __p0) { - float32x2_t __ret; - __ret = (float32x2_t) __builtin_neon_vrnd64z_v((int8x8_t)__p0, 9); - return __ret; -} -#else -__ai float32x2_t vrnd64z_f32(float32x2_t __p0) { - float32x2_t __ret; - float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - __ret = (float32x2_t) __builtin_neon_vrnd64z_v((int8x8_t)__rev0, 9); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#endif -#if __ARM_ARCH >= 8 && defined(__aarch64__) && defined(__ARM_FEATURE_NUMERIC_MAXMIN) -#ifdef __LITTLE_ENDIAN__ -__ai float64x2_t vmaxnmq_f64(float64x2_t __p0, float64x2_t __p1) { - float64x2_t __ret; - __ret = (float64x2_t) __builtin_neon_vmaxnmq_v((int8x16_t)__p0, (int8x16_t)__p1, 42); - return __ret; -} -#else -__ai float64x2_t vmaxnmq_f64(float64x2_t __p0, float64x2_t __p1) { - float64x2_t __ret; - float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __ret = (float64x2_t) __builtin_neon_vmaxnmq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -__ai float64x1_t vmaxnm_f64(float64x1_t __p0, float64x1_t __p1) { - float64x1_t __ret; - __ret = (float64x1_t) __builtin_neon_vmaxnm_v((int8x8_t)__p0, (int8x8_t)__p1, 10); - return __ret; -} -#ifdef __LITTLE_ENDIAN__ -__ai float64x2_t vminnmq_f64(float64x2_t __p0, float64x2_t __p1) { - float64x2_t __ret; - __ret = (float64x2_t) __builtin_neon_vminnmq_v((int8x16_t)__p0, (int8x16_t)__p1, 42); - return __ret; -} -#else -__ai float64x2_t vminnmq_f64(float64x2_t __p0, float64x2_t __p1) { - float64x2_t __ret; - float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __ret = (float64x2_t) __builtin_neon_vminnmq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -__ai float64x1_t vminnm_f64(float64x1_t __p0, float64x1_t __p1) { - float64x1_t __ret; - __ret = (float64x1_t) __builtin_neon_vminnm_v((int8x8_t)__p0, (int8x8_t)__p1, 10); - return __ret; -} -#endif -#if defined(__ARM_FEATURE_BF16) && !defined(__aarch64__) -__ai poly8x8_t vreinterpret_p8_bf16(bfloat16x4_t __p0) { - poly8x8_t __ret; - __ret = (poly8x8_t)(__p0); - return __ret; -} -__ai poly64x1_t vreinterpret_p64_bf16(bfloat16x4_t __p0) { - poly64x1_t __ret; - __ret = (poly64x1_t)(__p0); - return __ret; -} -__ai poly16x4_t vreinterpret_p16_bf16(bfloat16x4_t __p0) { - poly16x4_t __ret; - __ret = (poly16x4_t)(__p0); - return __ret; -} -__ai poly8x16_t vreinterpretq_p8_bf16(bfloat16x8_t __p0) { - poly8x16_t __ret; - __ret = (poly8x16_t)(__p0); - return __ret; -} -__ai poly64x2_t vreinterpretq_p64_bf16(bfloat16x8_t __p0) { - poly64x2_t __ret; - __ret = (poly64x2_t)(__p0); - return __ret; -} -__ai poly16x8_t vreinterpretq_p16_bf16(bfloat16x8_t __p0) { - poly16x8_t __ret; - __ret = (poly16x8_t)(__p0); - return __ret; -} -__ai uint8x16_t vreinterpretq_u8_bf16(bfloat16x8_t __p0) { - uint8x16_t __ret; - __ret = (uint8x16_t)(__p0); - return __ret; -} -__ai uint32x4_t vreinterpretq_u32_bf16(bfloat16x8_t __p0) { - uint32x4_t __ret; - __ret = (uint32x4_t)(__p0); - return __ret; -} -__ai uint64x2_t vreinterpretq_u64_bf16(bfloat16x8_t __p0) { - uint64x2_t __ret; - __ret = (uint64x2_t)(__p0); - return __ret; -} -__ai uint16x8_t vreinterpretq_u16_bf16(bfloat16x8_t __p0) { - uint16x8_t __ret; - __ret = (uint16x8_t)(__p0); - return __ret; -} -__ai int8x16_t vreinterpretq_s8_bf16(bfloat16x8_t __p0) { - int8x16_t __ret; - __ret = (int8x16_t)(__p0); - return __ret; -} -__ai float32x4_t vreinterpretq_f32_bf16(bfloat16x8_t __p0) { - float32x4_t __ret; - __ret = (float32x4_t)(__p0); - return __ret; -} -__ai float16x8_t vreinterpretq_f16_bf16(bfloat16x8_t __p0) { - float16x8_t __ret; - __ret = (float16x8_t)(__p0); - return __ret; -} -__ai int32x4_t vreinterpretq_s32_bf16(bfloat16x8_t __p0) { - int32x4_t __ret; - __ret = (int32x4_t)(__p0); - return __ret; -} -__ai int64x2_t vreinterpretq_s64_bf16(bfloat16x8_t __p0) { - int64x2_t __ret; - __ret = (int64x2_t)(__p0); - return __ret; -} -__ai int16x8_t vreinterpretq_s16_bf16(bfloat16x8_t __p0) { - int16x8_t __ret; - __ret = (int16x8_t)(__p0); - return __ret; -} -__ai uint8x8_t vreinterpret_u8_bf16(bfloat16x4_t __p0) { - uint8x8_t __ret; - __ret = (uint8x8_t)(__p0); - return __ret; -} -__ai uint32x2_t vreinterpret_u32_bf16(bfloat16x4_t __p0) { - uint32x2_t __ret; - __ret = (uint32x2_t)(__p0); - return __ret; -} -__ai uint64x1_t vreinterpret_u64_bf16(bfloat16x4_t __p0) { - uint64x1_t __ret; - __ret = (uint64x1_t)(__p0); - return __ret; -} -__ai uint16x4_t vreinterpret_u16_bf16(bfloat16x4_t __p0) { - uint16x4_t __ret; - __ret = (uint16x4_t)(__p0); - return __ret; -} -__ai int8x8_t vreinterpret_s8_bf16(bfloat16x4_t __p0) { - int8x8_t __ret; - __ret = (int8x8_t)(__p0); - return __ret; -} -__ai float32x2_t vreinterpret_f32_bf16(bfloat16x4_t __p0) { - float32x2_t __ret; - __ret = (float32x2_t)(__p0); - return __ret; -} -__ai float16x4_t vreinterpret_f16_bf16(bfloat16x4_t __p0) { - float16x4_t __ret; - __ret = (float16x4_t)(__p0); - return __ret; -} -__ai int32x2_t vreinterpret_s32_bf16(bfloat16x4_t __p0) { - int32x2_t __ret; - __ret = (int32x2_t)(__p0); - return __ret; -} -__ai int64x1_t vreinterpret_s64_bf16(bfloat16x4_t __p0) { - int64x1_t __ret; - __ret = (int64x1_t)(__p0); - return __ret; -} -__ai int16x4_t vreinterpret_s16_bf16(bfloat16x4_t __p0) { - int16x4_t __ret; - __ret = (int16x4_t)(__p0); - return __ret; -} -__ai bfloat16x8_t vreinterpretq_bf16_p8(poly8x16_t __p0) { - bfloat16x8_t __ret; - __ret = (bfloat16x8_t)(__p0); - return __ret; -} -__ai bfloat16x8_t vreinterpretq_bf16_p64(poly64x2_t __p0) { - bfloat16x8_t __ret; - __ret = (bfloat16x8_t)(__p0); - return __ret; -} -__ai bfloat16x8_t vreinterpretq_bf16_p16(poly16x8_t __p0) { - bfloat16x8_t __ret; - __ret = (bfloat16x8_t)(__p0); - return __ret; -} -__ai bfloat16x8_t vreinterpretq_bf16_u8(uint8x16_t __p0) { - bfloat16x8_t __ret; - __ret = (bfloat16x8_t)(__p0); - return __ret; -} -__ai bfloat16x8_t vreinterpretq_bf16_u32(uint32x4_t __p0) { - bfloat16x8_t __ret; - __ret = (bfloat16x8_t)(__p0); - return __ret; -} -__ai bfloat16x8_t vreinterpretq_bf16_u64(uint64x2_t __p0) { - bfloat16x8_t __ret; - __ret = (bfloat16x8_t)(__p0); - return __ret; -} -__ai bfloat16x8_t vreinterpretq_bf16_u16(uint16x8_t __p0) { - bfloat16x8_t __ret; - __ret = (bfloat16x8_t)(__p0); - return __ret; -} -__ai bfloat16x8_t vreinterpretq_bf16_s8(int8x16_t __p0) { - bfloat16x8_t __ret; - __ret = (bfloat16x8_t)(__p0); - return __ret; -} -__ai bfloat16x8_t vreinterpretq_bf16_f32(float32x4_t __p0) { - bfloat16x8_t __ret; - __ret = (bfloat16x8_t)(__p0); - return __ret; -} -__ai bfloat16x8_t vreinterpretq_bf16_f16(float16x8_t __p0) { - bfloat16x8_t __ret; - __ret = (bfloat16x8_t)(__p0); - return __ret; -} -__ai bfloat16x8_t vreinterpretq_bf16_s32(int32x4_t __p0) { - bfloat16x8_t __ret; - __ret = (bfloat16x8_t)(__p0); - return __ret; -} -__ai bfloat16x8_t vreinterpretq_bf16_s64(int64x2_t __p0) { - bfloat16x8_t __ret; - __ret = (bfloat16x8_t)(__p0); - return __ret; -} -__ai bfloat16x8_t vreinterpretq_bf16_s16(int16x8_t __p0) { - bfloat16x8_t __ret; - __ret = (bfloat16x8_t)(__p0); - return __ret; -} -__ai bfloat16x4_t vreinterpret_bf16_p8(poly8x8_t __p0) { - bfloat16x4_t __ret; - __ret = (bfloat16x4_t)(__p0); - return __ret; -} -__ai bfloat16x4_t vreinterpret_bf16_p64(poly64x1_t __p0) { - bfloat16x4_t __ret; - __ret = (bfloat16x4_t)(__p0); - return __ret; -} -__ai bfloat16x4_t vreinterpret_bf16_p16(poly16x4_t __p0) { - bfloat16x4_t __ret; - __ret = (bfloat16x4_t)(__p0); - return __ret; -} -__ai bfloat16x4_t vreinterpret_bf16_u8(uint8x8_t __p0) { - bfloat16x4_t __ret; - __ret = (bfloat16x4_t)(__p0); - return __ret; -} -__ai bfloat16x4_t vreinterpret_bf16_u32(uint32x2_t __p0) { - bfloat16x4_t __ret; - __ret = (bfloat16x4_t)(__p0); - return __ret; -} -__ai bfloat16x4_t vreinterpret_bf16_u64(uint64x1_t __p0) { - bfloat16x4_t __ret; - __ret = (bfloat16x4_t)(__p0); - return __ret; -} -__ai bfloat16x4_t vreinterpret_bf16_u16(uint16x4_t __p0) { - bfloat16x4_t __ret; - __ret = (bfloat16x4_t)(__p0); - return __ret; -} -__ai bfloat16x4_t vreinterpret_bf16_s8(int8x8_t __p0) { - bfloat16x4_t __ret; - __ret = (bfloat16x4_t)(__p0); - return __ret; -} -__ai bfloat16x4_t vreinterpret_bf16_f32(float32x2_t __p0) { - bfloat16x4_t __ret; - __ret = (bfloat16x4_t)(__p0); - return __ret; -} -__ai bfloat16x4_t vreinterpret_bf16_f16(float16x4_t __p0) { - bfloat16x4_t __ret; - __ret = (bfloat16x4_t)(__p0); - return __ret; -} -__ai bfloat16x4_t vreinterpret_bf16_s32(int32x2_t __p0) { - bfloat16x4_t __ret; - __ret = (bfloat16x4_t)(__p0); - return __ret; -} -__ai bfloat16x4_t vreinterpret_bf16_s64(int64x1_t __p0) { - bfloat16x4_t __ret; - __ret = (bfloat16x4_t)(__p0); - return __ret; -} -__ai bfloat16x4_t vreinterpret_bf16_s16(int16x4_t __p0) { - bfloat16x4_t __ret; - __ret = (bfloat16x4_t)(__p0); - return __ret; -} -#endif -#if defined(__ARM_FEATURE_BF16) && defined(__aarch64__) -__ai poly8x8_t vreinterpret_p8_bf16(bfloat16x4_t __p0) { - poly8x8_t __ret; - __ret = (poly8x8_t)(__p0); - return __ret; -} -__ai poly64x1_t vreinterpret_p64_bf16(bfloat16x4_t __p0) { - poly64x1_t __ret; - __ret = (poly64x1_t)(__p0); - return __ret; -} -__ai poly16x4_t vreinterpret_p16_bf16(bfloat16x4_t __p0) { - poly16x4_t __ret; - __ret = (poly16x4_t)(__p0); - return __ret; -} -__ai poly8x16_t vreinterpretq_p8_bf16(bfloat16x8_t __p0) { - poly8x16_t __ret; - __ret = (poly8x16_t)(__p0); - return __ret; -} -__ai poly128_t vreinterpretq_p128_bf16(bfloat16x8_t __p0) { - poly128_t __ret; - __ret = (poly128_t)(__p0); - return __ret; -} -__ai poly64x2_t vreinterpretq_p64_bf16(bfloat16x8_t __p0) { - poly64x2_t __ret; - __ret = (poly64x2_t)(__p0); - return __ret; -} -__ai poly16x8_t vreinterpretq_p16_bf16(bfloat16x8_t __p0) { - poly16x8_t __ret; - __ret = (poly16x8_t)(__p0); - return __ret; -} -__ai uint8x16_t vreinterpretq_u8_bf16(bfloat16x8_t __p0) { - uint8x16_t __ret; - __ret = (uint8x16_t)(__p0); - return __ret; -} -__ai uint32x4_t vreinterpretq_u32_bf16(bfloat16x8_t __p0) { - uint32x4_t __ret; - __ret = (uint32x4_t)(__p0); - return __ret; -} -__ai uint64x2_t vreinterpretq_u64_bf16(bfloat16x8_t __p0) { - uint64x2_t __ret; - __ret = (uint64x2_t)(__p0); - return __ret; -} -__ai uint16x8_t vreinterpretq_u16_bf16(bfloat16x8_t __p0) { - uint16x8_t __ret; - __ret = (uint16x8_t)(__p0); - return __ret; -} -__ai int8x16_t vreinterpretq_s8_bf16(bfloat16x8_t __p0) { - int8x16_t __ret; - __ret = (int8x16_t)(__p0); - return __ret; -} -__ai float64x2_t vreinterpretq_f64_bf16(bfloat16x8_t __p0) { - float64x2_t __ret; - __ret = (float64x2_t)(__p0); - return __ret; -} -__ai float32x4_t vreinterpretq_f32_bf16(bfloat16x8_t __p0) { - float32x4_t __ret; - __ret = (float32x4_t)(__p0); - return __ret; -} -__ai float16x8_t vreinterpretq_f16_bf16(bfloat16x8_t __p0) { - float16x8_t __ret; - __ret = (float16x8_t)(__p0); - return __ret; -} -__ai int32x4_t vreinterpretq_s32_bf16(bfloat16x8_t __p0) { - int32x4_t __ret; - __ret = (int32x4_t)(__p0); - return __ret; -} -__ai int64x2_t vreinterpretq_s64_bf16(bfloat16x8_t __p0) { - int64x2_t __ret; - __ret = (int64x2_t)(__p0); - return __ret; -} -__ai int16x8_t vreinterpretq_s16_bf16(bfloat16x8_t __p0) { - int16x8_t __ret; - __ret = (int16x8_t)(__p0); - return __ret; -} -__ai uint8x8_t vreinterpret_u8_bf16(bfloat16x4_t __p0) { - uint8x8_t __ret; - __ret = (uint8x8_t)(__p0); - return __ret; -} -__ai uint32x2_t vreinterpret_u32_bf16(bfloat16x4_t __p0) { - uint32x2_t __ret; - __ret = (uint32x2_t)(__p0); - return __ret; -} -__ai uint64x1_t vreinterpret_u64_bf16(bfloat16x4_t __p0) { - uint64x1_t __ret; - __ret = (uint64x1_t)(__p0); - return __ret; -} -__ai uint16x4_t vreinterpret_u16_bf16(bfloat16x4_t __p0) { - uint16x4_t __ret; - __ret = (uint16x4_t)(__p0); - return __ret; -} -__ai int8x8_t vreinterpret_s8_bf16(bfloat16x4_t __p0) { - int8x8_t __ret; - __ret = (int8x8_t)(__p0); - return __ret; -} -__ai float64x1_t vreinterpret_f64_bf16(bfloat16x4_t __p0) { - float64x1_t __ret; - __ret = (float64x1_t)(__p0); - return __ret; -} -__ai float32x2_t vreinterpret_f32_bf16(bfloat16x4_t __p0) { - float32x2_t __ret; - __ret = (float32x2_t)(__p0); - return __ret; -} -__ai float16x4_t vreinterpret_f16_bf16(bfloat16x4_t __p0) { - float16x4_t __ret; - __ret = (float16x4_t)(__p0); - return __ret; -} -__ai int32x2_t vreinterpret_s32_bf16(bfloat16x4_t __p0) { - int32x2_t __ret; - __ret = (int32x2_t)(__p0); - return __ret; -} -__ai int64x1_t vreinterpret_s64_bf16(bfloat16x4_t __p0) { - int64x1_t __ret; - __ret = (int64x1_t)(__p0); - return __ret; -} -__ai int16x4_t vreinterpret_s16_bf16(bfloat16x4_t __p0) { - int16x4_t __ret; - __ret = (int16x4_t)(__p0); - return __ret; -} -__ai bfloat16x8_t vreinterpretq_bf16_p8(poly8x16_t __p0) { - bfloat16x8_t __ret; - __ret = (bfloat16x8_t)(__p0); - return __ret; -} -__ai bfloat16x8_t vreinterpretq_bf16_p128(poly128_t __p0) { - bfloat16x8_t __ret; - __ret = (bfloat16x8_t)(__p0); - return __ret; -} -__ai bfloat16x8_t vreinterpretq_bf16_p64(poly64x2_t __p0) { - bfloat16x8_t __ret; - __ret = (bfloat16x8_t)(__p0); - return __ret; -} -__ai bfloat16x8_t vreinterpretq_bf16_p16(poly16x8_t __p0) { - bfloat16x8_t __ret; - __ret = (bfloat16x8_t)(__p0); - return __ret; -} -__ai bfloat16x8_t vreinterpretq_bf16_u8(uint8x16_t __p0) { - bfloat16x8_t __ret; - __ret = (bfloat16x8_t)(__p0); - return __ret; -} -__ai bfloat16x8_t vreinterpretq_bf16_u32(uint32x4_t __p0) { - bfloat16x8_t __ret; - __ret = (bfloat16x8_t)(__p0); - return __ret; -} -__ai bfloat16x8_t vreinterpretq_bf16_u64(uint64x2_t __p0) { - bfloat16x8_t __ret; - __ret = (bfloat16x8_t)(__p0); - return __ret; -} -__ai bfloat16x8_t vreinterpretq_bf16_u16(uint16x8_t __p0) { - bfloat16x8_t __ret; - __ret = (bfloat16x8_t)(__p0); - return __ret; -} -__ai bfloat16x8_t vreinterpretq_bf16_s8(int8x16_t __p0) { - bfloat16x8_t __ret; - __ret = (bfloat16x8_t)(__p0); - return __ret; -} -__ai bfloat16x8_t vreinterpretq_bf16_f64(float64x2_t __p0) { - bfloat16x8_t __ret; - __ret = (bfloat16x8_t)(__p0); - return __ret; -} -__ai bfloat16x8_t vreinterpretq_bf16_f32(float32x4_t __p0) { - bfloat16x8_t __ret; - __ret = (bfloat16x8_t)(__p0); - return __ret; -} -__ai bfloat16x8_t vreinterpretq_bf16_f16(float16x8_t __p0) { - bfloat16x8_t __ret; - __ret = (bfloat16x8_t)(__p0); - return __ret; -} -__ai bfloat16x8_t vreinterpretq_bf16_s32(int32x4_t __p0) { - bfloat16x8_t __ret; - __ret = (bfloat16x8_t)(__p0); - return __ret; -} -__ai bfloat16x8_t vreinterpretq_bf16_s64(int64x2_t __p0) { - bfloat16x8_t __ret; - __ret = (bfloat16x8_t)(__p0); - return __ret; -} -__ai bfloat16x8_t vreinterpretq_bf16_s16(int16x8_t __p0) { - bfloat16x8_t __ret; - __ret = (bfloat16x8_t)(__p0); - return __ret; -} -__ai bfloat16x4_t vreinterpret_bf16_p8(poly8x8_t __p0) { - bfloat16x4_t __ret; - __ret = (bfloat16x4_t)(__p0); - return __ret; -} -__ai bfloat16x4_t vreinterpret_bf16_p64(poly64x1_t __p0) { - bfloat16x4_t __ret; - __ret = (bfloat16x4_t)(__p0); - return __ret; -} -__ai bfloat16x4_t vreinterpret_bf16_p16(poly16x4_t __p0) { - bfloat16x4_t __ret; - __ret = (bfloat16x4_t)(__p0); - return __ret; -} -__ai bfloat16x4_t vreinterpret_bf16_u8(uint8x8_t __p0) { - bfloat16x4_t __ret; - __ret = (bfloat16x4_t)(__p0); - return __ret; -} -__ai bfloat16x4_t vreinterpret_bf16_u32(uint32x2_t __p0) { - bfloat16x4_t __ret; - __ret = (bfloat16x4_t)(__p0); - return __ret; -} -__ai bfloat16x4_t vreinterpret_bf16_u64(uint64x1_t __p0) { - bfloat16x4_t __ret; - __ret = (bfloat16x4_t)(__p0); - return __ret; -} -__ai bfloat16x4_t vreinterpret_bf16_u16(uint16x4_t __p0) { - bfloat16x4_t __ret; - __ret = (bfloat16x4_t)(__p0); - return __ret; -} -__ai bfloat16x4_t vreinterpret_bf16_s8(int8x8_t __p0) { - bfloat16x4_t __ret; - __ret = (bfloat16x4_t)(__p0); - return __ret; -} -__ai bfloat16x4_t vreinterpret_bf16_f64(float64x1_t __p0) { - bfloat16x4_t __ret; - __ret = (bfloat16x4_t)(__p0); - return __ret; -} -__ai bfloat16x4_t vreinterpret_bf16_f32(float32x2_t __p0) { - bfloat16x4_t __ret; - __ret = (bfloat16x4_t)(__p0); - return __ret; -} -__ai bfloat16x4_t vreinterpret_bf16_f16(float16x4_t __p0) { - bfloat16x4_t __ret; - __ret = (bfloat16x4_t)(__p0); - return __ret; -} -__ai bfloat16x4_t vreinterpret_bf16_s32(int32x2_t __p0) { - bfloat16x4_t __ret; - __ret = (bfloat16x4_t)(__p0); - return __ret; -} -__ai bfloat16x4_t vreinterpret_bf16_s64(int64x1_t __p0) { - bfloat16x4_t __ret; - __ret = (bfloat16x4_t)(__p0); - return __ret; -} -__ai bfloat16x4_t vreinterpret_bf16_s16(int16x4_t __p0) { - bfloat16x4_t __ret; - __ret = (bfloat16x4_t)(__p0); - return __ret; -} -#endif -#if defined(__ARM_FEATURE_BF16_VECTOR_ARITHMETIC) -#ifdef __LITTLE_ENDIAN__ -#define splatq_lane_bf16(__p0, __p1) __extension__ ({ \ - bfloat16x8_t __ret; \ - bfloat16x4_t __s0 = __p0; \ - __ret = (bfloat16x8_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 11); \ - __ret; \ -}) -#else -#define splatq_lane_bf16(__p0, __p1) __extension__ ({ \ - bfloat16x8_t __ret; \ - bfloat16x4_t __s0 = __p0; \ - bfloat16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - __ret = (bfloat16x8_t) __builtin_neon_splatq_lane_v((int8x8_t)__rev0, __p1, 11); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#define __noswap_splatq_lane_bf16(__p0, __p1) __extension__ ({ \ - bfloat16x8_t __ret; \ - bfloat16x4_t __s0 = __p0; \ - __ret = (bfloat16x8_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 11); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define splat_lane_bf16(__p0, __p1) __extension__ ({ \ - bfloat16x4_t __ret; \ - bfloat16x4_t __s0 = __p0; \ - __ret = (bfloat16x4_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 11); \ - __ret; \ -}) -#else -#define splat_lane_bf16(__p0, __p1) __extension__ ({ \ - bfloat16x4_t __ret; \ - bfloat16x4_t __s0 = __p0; \ - bfloat16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - __ret = (bfloat16x4_t) __builtin_neon_splat_lane_v((int8x8_t)__rev0, __p1, 11); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#define __noswap_splat_lane_bf16(__p0, __p1) __extension__ ({ \ - bfloat16x4_t __ret; \ - bfloat16x4_t __s0 = __p0; \ - __ret = (bfloat16x4_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 11); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define splatq_laneq_bf16(__p0, __p1) __extension__ ({ \ - bfloat16x8_t __ret; \ - bfloat16x8_t __s0 = __p0; \ - __ret = (bfloat16x8_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 43); \ - __ret; \ -}) -#else -#define splatq_laneq_bf16(__p0, __p1) __extension__ ({ \ - bfloat16x8_t __ret; \ - bfloat16x8_t __s0 = __p0; \ - bfloat16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret = (bfloat16x8_t) __builtin_neon_splatq_laneq_v((int8x16_t)__rev0, __p1, 43); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#define __noswap_splatq_laneq_bf16(__p0, __p1) __extension__ ({ \ - bfloat16x8_t __ret; \ - bfloat16x8_t __s0 = __p0; \ - __ret = (bfloat16x8_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 43); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define splat_laneq_bf16(__p0, __p1) __extension__ ({ \ - bfloat16x4_t __ret; \ - bfloat16x8_t __s0 = __p0; \ - __ret = (bfloat16x4_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 43); \ - __ret; \ -}) -#else -#define splat_laneq_bf16(__p0, __p1) __extension__ ({ \ - bfloat16x4_t __ret; \ - bfloat16x8_t __s0 = __p0; \ - bfloat16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret = (bfloat16x4_t) __builtin_neon_splat_laneq_v((int8x16_t)__rev0, __p1, 43); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#define __noswap_splat_laneq_bf16(__p0, __p1) __extension__ ({ \ - bfloat16x4_t __ret; \ - bfloat16x8_t __s0 = __p0; \ - __ret = (bfloat16x4_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 43); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float32x4_t vbfdotq_f32(float32x4_t __p0, bfloat16x8_t __p1, bfloat16x8_t __p2) { - float32x4_t __ret; - __ret = (float32x4_t) __builtin_neon_vbfdotq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); - return __ret; -} -#else -__ai float32x4_t vbfdotq_f32(float32x4_t __p0, bfloat16x8_t __p1, bfloat16x8_t __p2) { - float32x4_t __ret; - float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - bfloat16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - bfloat16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (float32x4_t) __builtin_neon_vbfdotq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -__ai float32x4_t __noswap_vbfdotq_f32(float32x4_t __p0, bfloat16x8_t __p1, bfloat16x8_t __p2) { - float32x4_t __ret; - __ret = (float32x4_t) __builtin_neon_vbfdotq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float32x2_t vbfdot_f32(float32x2_t __p0, bfloat16x4_t __p1, bfloat16x4_t __p2) { - float32x2_t __ret; - __ret = (float32x2_t) __builtin_neon_vbfdot_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9); - return __ret; -} -#else -__ai float32x2_t vbfdot_f32(float32x2_t __p0, bfloat16x4_t __p1, bfloat16x4_t __p2) { - float32x2_t __ret; - float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - bfloat16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - bfloat16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); - __ret = (float32x2_t) __builtin_neon_vbfdot_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 9); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -__ai float32x2_t __noswap_vbfdot_f32(float32x2_t __p0, bfloat16x4_t __p1, bfloat16x4_t __p2) { - float32x2_t __ret; - __ret = (float32x2_t) __builtin_neon_vbfdot_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vbfdotq_lane_f32(__p0_142, __p1_142, __p2_142, __p3_142) __extension__ ({ \ - float32x4_t __ret_142; \ - float32x4_t __s0_142 = __p0_142; \ - bfloat16x8_t __s1_142 = __p1_142; \ - bfloat16x4_t __s2_142 = __p2_142; \ -bfloat16x4_t __reint_142 = __s2_142; \ -float32x4_t __reint1_142 = splatq_lane_f32(*(float32x2_t *) &__reint_142, __p3_142); \ - __ret_142 = vbfdotq_f32(__s0_142, __s1_142, *(bfloat16x8_t *) &__reint1_142); \ - __ret_142; \ -}) -#else -#define vbfdotq_lane_f32(__p0_143, __p1_143, __p2_143, __p3_143) __extension__ ({ \ - float32x4_t __ret_143; \ - float32x4_t __s0_143 = __p0_143; \ - bfloat16x8_t __s1_143 = __p1_143; \ - bfloat16x4_t __s2_143 = __p2_143; \ - float32x4_t __rev0_143; __rev0_143 = __builtin_shufflevector(__s0_143, __s0_143, 3, 2, 1, 0); \ - bfloat16x8_t __rev1_143; __rev1_143 = __builtin_shufflevector(__s1_143, __s1_143, 7, 6, 5, 4, 3, 2, 1, 0); \ - bfloat16x4_t __rev2_143; __rev2_143 = __builtin_shufflevector(__s2_143, __s2_143, 3, 2, 1, 0); \ -bfloat16x4_t __reint_143 = __rev2_143; \ -float32x4_t __reint1_143 = __noswap_splatq_lane_f32(*(float32x2_t *) &__reint_143, __p3_143); \ - __ret_143 = __noswap_vbfdotq_f32(__rev0_143, __rev1_143, *(bfloat16x8_t *) &__reint1_143); \ - __ret_143 = __builtin_shufflevector(__ret_143, __ret_143, 3, 2, 1, 0); \ - __ret_143; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vbfdot_lane_f32(__p0_144, __p1_144, __p2_144, __p3_144) __extension__ ({ \ - float32x2_t __ret_144; \ - float32x2_t __s0_144 = __p0_144; \ - bfloat16x4_t __s1_144 = __p1_144; \ - bfloat16x4_t __s2_144 = __p2_144; \ -bfloat16x4_t __reint_144 = __s2_144; \ -float32x2_t __reint1_144 = splat_lane_f32(*(float32x2_t *) &__reint_144, __p3_144); \ - __ret_144 = vbfdot_f32(__s0_144, __s1_144, *(bfloat16x4_t *) &__reint1_144); \ - __ret_144; \ -}) -#else -#define vbfdot_lane_f32(__p0_145, __p1_145, __p2_145, __p3_145) __extension__ ({ \ - float32x2_t __ret_145; \ - float32x2_t __s0_145 = __p0_145; \ - bfloat16x4_t __s1_145 = __p1_145; \ - bfloat16x4_t __s2_145 = __p2_145; \ - float32x2_t __rev0_145; __rev0_145 = __builtin_shufflevector(__s0_145, __s0_145, 1, 0); \ - bfloat16x4_t __rev1_145; __rev1_145 = __builtin_shufflevector(__s1_145, __s1_145, 3, 2, 1, 0); \ - bfloat16x4_t __rev2_145; __rev2_145 = __builtin_shufflevector(__s2_145, __s2_145, 3, 2, 1, 0); \ -bfloat16x4_t __reint_145 = __rev2_145; \ -float32x2_t __reint1_145 = __noswap_splat_lane_f32(*(float32x2_t *) &__reint_145, __p3_145); \ - __ret_145 = __noswap_vbfdot_f32(__rev0_145, __rev1_145, *(bfloat16x4_t *) &__reint1_145); \ - __ret_145 = __builtin_shufflevector(__ret_145, __ret_145, 1, 0); \ - __ret_145; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vbfdotq_laneq_f32(__p0_146, __p1_146, __p2_146, __p3_146) __extension__ ({ \ - float32x4_t __ret_146; \ - float32x4_t __s0_146 = __p0_146; \ - bfloat16x8_t __s1_146 = __p1_146; \ - bfloat16x8_t __s2_146 = __p2_146; \ -bfloat16x8_t __reint_146 = __s2_146; \ -float32x4_t __reint1_146 = splatq_laneq_f32(*(float32x4_t *) &__reint_146, __p3_146); \ - __ret_146 = vbfdotq_f32(__s0_146, __s1_146, *(bfloat16x8_t *) &__reint1_146); \ - __ret_146; \ -}) -#else -#define vbfdotq_laneq_f32(__p0_147, __p1_147, __p2_147, __p3_147) __extension__ ({ \ - float32x4_t __ret_147; \ - float32x4_t __s0_147 = __p0_147; \ - bfloat16x8_t __s1_147 = __p1_147; \ - bfloat16x8_t __s2_147 = __p2_147; \ - float32x4_t __rev0_147; __rev0_147 = __builtin_shufflevector(__s0_147, __s0_147, 3, 2, 1, 0); \ - bfloat16x8_t __rev1_147; __rev1_147 = __builtin_shufflevector(__s1_147, __s1_147, 7, 6, 5, 4, 3, 2, 1, 0); \ - bfloat16x8_t __rev2_147; __rev2_147 = __builtin_shufflevector(__s2_147, __s2_147, 7, 6, 5, 4, 3, 2, 1, 0); \ -bfloat16x8_t __reint_147 = __rev2_147; \ -float32x4_t __reint1_147 = __noswap_splatq_laneq_f32(*(float32x4_t *) &__reint_147, __p3_147); \ - __ret_147 = __noswap_vbfdotq_f32(__rev0_147, __rev1_147, *(bfloat16x8_t *) &__reint1_147); \ - __ret_147 = __builtin_shufflevector(__ret_147, __ret_147, 3, 2, 1, 0); \ - __ret_147; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vbfdot_laneq_f32(__p0_148, __p1_148, __p2_148, __p3_148) __extension__ ({ \ - float32x2_t __ret_148; \ - float32x2_t __s0_148 = __p0_148; \ - bfloat16x4_t __s1_148 = __p1_148; \ - bfloat16x8_t __s2_148 = __p2_148; \ -bfloat16x8_t __reint_148 = __s2_148; \ -float32x2_t __reint1_148 = splat_laneq_f32(*(float32x4_t *) &__reint_148, __p3_148); \ - __ret_148 = vbfdot_f32(__s0_148, __s1_148, *(bfloat16x4_t *) &__reint1_148); \ - __ret_148; \ -}) -#else -#define vbfdot_laneq_f32(__p0_149, __p1_149, __p2_149, __p3_149) __extension__ ({ \ - float32x2_t __ret_149; \ - float32x2_t __s0_149 = __p0_149; \ - bfloat16x4_t __s1_149 = __p1_149; \ - bfloat16x8_t __s2_149 = __p2_149; \ - float32x2_t __rev0_149; __rev0_149 = __builtin_shufflevector(__s0_149, __s0_149, 1, 0); \ - bfloat16x4_t __rev1_149; __rev1_149 = __builtin_shufflevector(__s1_149, __s1_149, 3, 2, 1, 0); \ - bfloat16x8_t __rev2_149; __rev2_149 = __builtin_shufflevector(__s2_149, __s2_149, 7, 6, 5, 4, 3, 2, 1, 0); \ -bfloat16x8_t __reint_149 = __rev2_149; \ -float32x2_t __reint1_149 = __noswap_splat_laneq_f32(*(float32x4_t *) &__reint_149, __p3_149); \ - __ret_149 = __noswap_vbfdot_f32(__rev0_149, __rev1_149, *(bfloat16x4_t *) &__reint1_149); \ - __ret_149 = __builtin_shufflevector(__ret_149, __ret_149, 1, 0); \ - __ret_149; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float32x4_t vbfmlalbq_f32(float32x4_t __p0, bfloat16x8_t __p1, bfloat16x8_t __p2) { - float32x4_t __ret; - __ret = (float32x4_t) __builtin_neon_vbfmlalbq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); - return __ret; -} -#else -__ai float32x4_t vbfmlalbq_f32(float32x4_t __p0, bfloat16x8_t __p1, bfloat16x8_t __p2) { - float32x4_t __ret; - float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - bfloat16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - bfloat16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (float32x4_t) __builtin_neon_vbfmlalbq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -__ai float32x4_t __noswap_vbfmlalbq_f32(float32x4_t __p0, bfloat16x8_t __p1, bfloat16x8_t __p2) { - float32x4_t __ret; - __ret = (float32x4_t) __builtin_neon_vbfmlalbq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float32x4_t vbfmlaltq_f32(float32x4_t __p0, bfloat16x8_t __p1, bfloat16x8_t __p2) { - float32x4_t __ret; - __ret = (float32x4_t) __builtin_neon_vbfmlaltq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); - return __ret; -} -#else -__ai float32x4_t vbfmlaltq_f32(float32x4_t __p0, bfloat16x8_t __p1, bfloat16x8_t __p2) { - float32x4_t __ret; - float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - bfloat16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - bfloat16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (float32x4_t) __builtin_neon_vbfmlaltq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -__ai float32x4_t __noswap_vbfmlaltq_f32(float32x4_t __p0, bfloat16x8_t __p1, bfloat16x8_t __p2) { - float32x4_t __ret; - __ret = (float32x4_t) __builtin_neon_vbfmlaltq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float32x4_t vbfmmlaq_f32(float32x4_t __p0, bfloat16x8_t __p1, bfloat16x8_t __p2) { - float32x4_t __ret; - __ret = (float32x4_t) __builtin_neon_vbfmmlaq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); - return __ret; -} -#else -__ai float32x4_t vbfmmlaq_f32(float32x4_t __p0, bfloat16x8_t __p1, bfloat16x8_t __p2) { - float32x4_t __ret; - float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - bfloat16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - bfloat16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (float32x4_t) __builtin_neon_vbfmmlaq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai bfloat16x8_t vcombine_bf16(bfloat16x4_t __p0, bfloat16x4_t __p1) { - bfloat16x8_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7); - return __ret; -} -#else -__ai bfloat16x8_t vcombine_bf16(bfloat16x4_t __p0, bfloat16x4_t __p1) { - bfloat16x8_t __ret; - bfloat16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - bfloat16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = __builtin_shufflevector(__rev0, __rev1, 0, 1, 2, 3, 4, 5, 6, 7); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -__ai bfloat16x8_t __noswap_vcombine_bf16(bfloat16x4_t __p0, bfloat16x4_t __p1) { - bfloat16x8_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7); - return __ret; -} -#endif - -#define vcreate_bf16(__p0) __extension__ ({ \ - bfloat16x4_t __ret; \ - uint64_t __promote = __p0; \ - __ret = (bfloat16x4_t)(__promote); \ - __ret; \ -}) -#ifdef __LITTLE_ENDIAN__ -__ai float32x4_t vcvt_f32_bf16(bfloat16x4_t __p0_150) { - float32x4_t __ret_150; -bfloat16x4_t __reint_150 = __p0_150; -int32x4_t __reint1_150 = vshll_n_s16(*(int16x4_t *) &__reint_150, 16); - __ret_150 = *(float32x4_t *) &__reint1_150; - return __ret_150; -} -#else -__ai float32x4_t vcvt_f32_bf16(bfloat16x4_t __p0_151) { - float32x4_t __ret_151; - bfloat16x4_t __rev0_151; __rev0_151 = __builtin_shufflevector(__p0_151, __p0_151, 3, 2, 1, 0); -bfloat16x4_t __reint_151 = __rev0_151; -int32x4_t __reint1_151 = __noswap_vshll_n_s16(*(int16x4_t *) &__reint_151, 16); - __ret_151 = *(float32x4_t *) &__reint1_151; - __ret_151 = __builtin_shufflevector(__ret_151, __ret_151, 3, 2, 1, 0); - return __ret_151; -} -__ai float32x4_t __noswap_vcvt_f32_bf16(bfloat16x4_t __p0_152) { - float32x4_t __ret_152; -bfloat16x4_t __reint_152 = __p0_152; -int32x4_t __reint1_152 = __noswap_vshll_n_s16(*(int16x4_t *) &__reint_152, 16); - __ret_152 = *(float32x4_t *) &__reint1_152; - return __ret_152; -} -#endif - -__ai float32_t vcvtah_f32_bf16(bfloat16_t __p0) { - float32_t __ret; -bfloat16_t __reint = __p0; -int32_t __reint1 = *(int32_t *) &__reint << 16; - __ret = *(float32_t *) &__reint1; - return __ret; -} -__ai bfloat16_t vcvth_bf16_f32(float32_t __p0) { - bfloat16_t __ret; - __ret = (bfloat16_t) __builtin_neon_vcvth_bf16_f32(__p0); - return __ret; -} -#ifdef __LITTLE_ENDIAN__ -#define vduph_lane_bf16(__p0, __p1) __extension__ ({ \ - bfloat16_t __ret; \ - bfloat16x4_t __s0 = __p0; \ - __ret = (bfloat16_t) __builtin_neon_vduph_lane_bf16((bfloat16x4_t)__s0, __p1); \ - __ret; \ -}) -#else -#define vduph_lane_bf16(__p0, __p1) __extension__ ({ \ - bfloat16_t __ret; \ - bfloat16x4_t __s0 = __p0; \ - bfloat16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - __ret = (bfloat16_t) __builtin_neon_vduph_lane_bf16((bfloat16x4_t)__rev0, __p1); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vdupq_lane_bf16(__p0_153, __p1_153) __extension__ ({ \ - bfloat16x8_t __ret_153; \ - bfloat16x4_t __s0_153 = __p0_153; \ - __ret_153 = splatq_lane_bf16(__s0_153, __p1_153); \ - __ret_153; \ -}) -#else -#define vdupq_lane_bf16(__p0_154, __p1_154) __extension__ ({ \ - bfloat16x8_t __ret_154; \ - bfloat16x4_t __s0_154 = __p0_154; \ - bfloat16x4_t __rev0_154; __rev0_154 = __builtin_shufflevector(__s0_154, __s0_154, 3, 2, 1, 0); \ - __ret_154 = __noswap_splatq_lane_bf16(__rev0_154, __p1_154); \ - __ret_154 = __builtin_shufflevector(__ret_154, __ret_154, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_154; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vdup_lane_bf16(__p0_155, __p1_155) __extension__ ({ \ - bfloat16x4_t __ret_155; \ - bfloat16x4_t __s0_155 = __p0_155; \ - __ret_155 = splat_lane_bf16(__s0_155, __p1_155); \ - __ret_155; \ -}) -#else -#define vdup_lane_bf16(__p0_156, __p1_156) __extension__ ({ \ - bfloat16x4_t __ret_156; \ - bfloat16x4_t __s0_156 = __p0_156; \ - bfloat16x4_t __rev0_156; __rev0_156 = __builtin_shufflevector(__s0_156, __s0_156, 3, 2, 1, 0); \ - __ret_156 = __noswap_splat_lane_bf16(__rev0_156, __p1_156); \ - __ret_156 = __builtin_shufflevector(__ret_156, __ret_156, 3, 2, 1, 0); \ - __ret_156; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vduph_laneq_bf16(__p0, __p1) __extension__ ({ \ - bfloat16_t __ret; \ - bfloat16x8_t __s0 = __p0; \ - __ret = (bfloat16_t) __builtin_neon_vduph_laneq_bf16((bfloat16x8_t)__s0, __p1); \ - __ret; \ -}) -#else -#define vduph_laneq_bf16(__p0, __p1) __extension__ ({ \ - bfloat16_t __ret; \ - bfloat16x8_t __s0 = __p0; \ - bfloat16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret = (bfloat16_t) __builtin_neon_vduph_laneq_bf16((bfloat16x8_t)__rev0, __p1); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vdupq_laneq_bf16(__p0_157, __p1_157) __extension__ ({ \ - bfloat16x8_t __ret_157; \ - bfloat16x8_t __s0_157 = __p0_157; \ - __ret_157 = splatq_laneq_bf16(__s0_157, __p1_157); \ - __ret_157; \ -}) -#else -#define vdupq_laneq_bf16(__p0_158, __p1_158) __extension__ ({ \ - bfloat16x8_t __ret_158; \ - bfloat16x8_t __s0_158 = __p0_158; \ - bfloat16x8_t __rev0_158; __rev0_158 = __builtin_shufflevector(__s0_158, __s0_158, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_158 = __noswap_splatq_laneq_bf16(__rev0_158, __p1_158); \ - __ret_158 = __builtin_shufflevector(__ret_158, __ret_158, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_158; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vdup_laneq_bf16(__p0_159, __p1_159) __extension__ ({ \ - bfloat16x4_t __ret_159; \ - bfloat16x8_t __s0_159 = __p0_159; \ - __ret_159 = splat_laneq_bf16(__s0_159, __p1_159); \ - __ret_159; \ -}) -#else -#define vdup_laneq_bf16(__p0_160, __p1_160) __extension__ ({ \ - bfloat16x4_t __ret_160; \ - bfloat16x8_t __s0_160 = __p0_160; \ - bfloat16x8_t __rev0_160; __rev0_160 = __builtin_shufflevector(__s0_160, __s0_160, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_160 = __noswap_splat_laneq_bf16(__rev0_160, __p1_160); \ - __ret_160 = __builtin_shufflevector(__ret_160, __ret_160, 3, 2, 1, 0); \ - __ret_160; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai bfloat16x8_t vdupq_n_bf16(bfloat16_t __p0) { - bfloat16x8_t __ret; - __ret = (bfloat16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; - return __ret; -} -#else -__ai bfloat16x8_t vdupq_n_bf16(bfloat16_t __p0) { - bfloat16x8_t __ret; - __ret = (bfloat16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai bfloat16x4_t vdup_n_bf16(bfloat16_t __p0) { - bfloat16x4_t __ret; - __ret = (bfloat16x4_t) {__p0, __p0, __p0, __p0}; - return __ret; -} -#else -__ai bfloat16x4_t vdup_n_bf16(bfloat16_t __p0) { - bfloat16x4_t __ret; - __ret = (bfloat16x4_t) {__p0, __p0, __p0, __p0}; - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai bfloat16x4_t vget_high_bf16(bfloat16x8_t __p0) { - bfloat16x4_t __ret; - __ret = __builtin_shufflevector(__p0, __p0, 4, 5, 6, 7); - return __ret; -} -#else -__ai bfloat16x4_t vget_high_bf16(bfloat16x8_t __p0) { - bfloat16x4_t __ret; - bfloat16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __builtin_shufflevector(__rev0, __rev0, 4, 5, 6, 7); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -__ai bfloat16x4_t __noswap_vget_high_bf16(bfloat16x8_t __p0) { - bfloat16x4_t __ret; - __ret = __builtin_shufflevector(__p0, __p0, 4, 5, 6, 7); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vgetq_lane_bf16(__p0, __p1) __extension__ ({ \ - bfloat16_t __ret; \ - bfloat16x8_t __s0 = __p0; \ - __ret = (bfloat16_t) __builtin_neon_vgetq_lane_bf16((bfloat16x8_t)__s0, __p1); \ - __ret; \ -}) -#else -#define vgetq_lane_bf16(__p0, __p1) __extension__ ({ \ - bfloat16_t __ret; \ - bfloat16x8_t __s0 = __p0; \ - bfloat16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret = (bfloat16_t) __builtin_neon_vgetq_lane_bf16((bfloat16x8_t)__rev0, __p1); \ - __ret; \ -}) -#define __noswap_vgetq_lane_bf16(__p0, __p1) __extension__ ({ \ - bfloat16_t __ret; \ - bfloat16x8_t __s0 = __p0; \ - __ret = (bfloat16_t) __builtin_neon_vgetq_lane_bf16((bfloat16x8_t)__s0, __p1); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vget_lane_bf16(__p0, __p1) __extension__ ({ \ - bfloat16_t __ret; \ - bfloat16x4_t __s0 = __p0; \ - __ret = (bfloat16_t) __builtin_neon_vget_lane_bf16((bfloat16x4_t)__s0, __p1); \ - __ret; \ -}) -#else -#define vget_lane_bf16(__p0, __p1) __extension__ ({ \ - bfloat16_t __ret; \ - bfloat16x4_t __s0 = __p0; \ - bfloat16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - __ret = (bfloat16_t) __builtin_neon_vget_lane_bf16((bfloat16x4_t)__rev0, __p1); \ - __ret; \ -}) -#define __noswap_vget_lane_bf16(__p0, __p1) __extension__ ({ \ - bfloat16_t __ret; \ - bfloat16x4_t __s0 = __p0; \ - __ret = (bfloat16_t) __builtin_neon_vget_lane_bf16((bfloat16x4_t)__s0, __p1); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai bfloat16x4_t vget_low_bf16(bfloat16x8_t __p0) { - bfloat16x4_t __ret; - __ret = __builtin_shufflevector(__p0, __p0, 0, 1, 2, 3); - return __ret; -} -#else -__ai bfloat16x4_t vget_low_bf16(bfloat16x8_t __p0) { - bfloat16x4_t __ret; - bfloat16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __builtin_shufflevector(__rev0, __rev0, 0, 1, 2, 3); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -__ai bfloat16x4_t __noswap_vget_low_bf16(bfloat16x8_t __p0) { - bfloat16x4_t __ret; - __ret = __builtin_shufflevector(__p0, __p0, 0, 1, 2, 3); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld1q_bf16(__p0) __extension__ ({ \ - bfloat16x8_t __ret; \ - __ret = (bfloat16x8_t) __builtin_neon_vld1q_v(__p0, 43); \ - __ret; \ -}) -#else -#define vld1q_bf16(__p0) __extension__ ({ \ - bfloat16x8_t __ret; \ - __ret = (bfloat16x8_t) __builtin_neon_vld1q_v(__p0, 43); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld1_bf16(__p0) __extension__ ({ \ - bfloat16x4_t __ret; \ - __ret = (bfloat16x4_t) __builtin_neon_vld1_v(__p0, 11); \ - __ret; \ -}) -#else -#define vld1_bf16(__p0) __extension__ ({ \ - bfloat16x4_t __ret; \ - __ret = (bfloat16x4_t) __builtin_neon_vld1_v(__p0, 11); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld1q_dup_bf16(__p0) __extension__ ({ \ - bfloat16x8_t __ret; \ - __ret = (bfloat16x8_t) __builtin_neon_vld1q_dup_v(__p0, 43); \ - __ret; \ -}) -#else -#define vld1q_dup_bf16(__p0) __extension__ ({ \ - bfloat16x8_t __ret; \ - __ret = (bfloat16x8_t) __builtin_neon_vld1q_dup_v(__p0, 43); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld1_dup_bf16(__p0) __extension__ ({ \ - bfloat16x4_t __ret; \ - __ret = (bfloat16x4_t) __builtin_neon_vld1_dup_v(__p0, 11); \ - __ret; \ -}) -#else -#define vld1_dup_bf16(__p0) __extension__ ({ \ - bfloat16x4_t __ret; \ - __ret = (bfloat16x4_t) __builtin_neon_vld1_dup_v(__p0, 11); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld1q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ - bfloat16x8_t __ret; \ - bfloat16x8_t __s1 = __p1; \ - __ret = (bfloat16x8_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 43); \ - __ret; \ -}) -#else -#define vld1q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ - bfloat16x8_t __ret; \ - bfloat16x8_t __s1 = __p1; \ - bfloat16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret = (bfloat16x8_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 43); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld1_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ - bfloat16x4_t __ret; \ - bfloat16x4_t __s1 = __p1; \ - __ret = (bfloat16x4_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 11); \ - __ret; \ -}) -#else -#define vld1_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ - bfloat16x4_t __ret; \ - bfloat16x4_t __s1 = __p1; \ - bfloat16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ - __ret = (bfloat16x4_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__rev1, __p2, 11); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld1q_bf16_x2(__p0) __extension__ ({ \ - bfloat16x8x2_t __ret; \ - __builtin_neon_vld1q_x2_v(&__ret, __p0, 43); \ - __ret; \ -}) -#else -#define vld1q_bf16_x2(__p0) __extension__ ({ \ - bfloat16x8x2_t __ret; \ - __builtin_neon_vld1q_x2_v(&__ret, __p0, 43); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld1_bf16_x2(__p0) __extension__ ({ \ - bfloat16x4x2_t __ret; \ - __builtin_neon_vld1_x2_v(&__ret, __p0, 11); \ - __ret; \ -}) -#else -#define vld1_bf16_x2(__p0) __extension__ ({ \ - bfloat16x4x2_t __ret; \ - __builtin_neon_vld1_x2_v(&__ret, __p0, 11); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld1q_bf16_x3(__p0) __extension__ ({ \ - bfloat16x8x3_t __ret; \ - __builtin_neon_vld1q_x3_v(&__ret, __p0, 43); \ - __ret; \ -}) -#else -#define vld1q_bf16_x3(__p0) __extension__ ({ \ - bfloat16x8x3_t __ret; \ - __builtin_neon_vld1q_x3_v(&__ret, __p0, 43); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld1_bf16_x3(__p0) __extension__ ({ \ - bfloat16x4x3_t __ret; \ - __builtin_neon_vld1_x3_v(&__ret, __p0, 11); \ - __ret; \ -}) -#else -#define vld1_bf16_x3(__p0) __extension__ ({ \ - bfloat16x4x3_t __ret; \ - __builtin_neon_vld1_x3_v(&__ret, __p0, 11); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld1q_bf16_x4(__p0) __extension__ ({ \ - bfloat16x8x4_t __ret; \ - __builtin_neon_vld1q_x4_v(&__ret, __p0, 43); \ - __ret; \ -}) -#else -#define vld1q_bf16_x4(__p0) __extension__ ({ \ - bfloat16x8x4_t __ret; \ - __builtin_neon_vld1q_x4_v(&__ret, __p0, 43); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld1_bf16_x4(__p0) __extension__ ({ \ - bfloat16x4x4_t __ret; \ - __builtin_neon_vld1_x4_v(&__ret, __p0, 11); \ - __ret; \ -}) -#else -#define vld1_bf16_x4(__p0) __extension__ ({ \ - bfloat16x4x4_t __ret; \ - __builtin_neon_vld1_x4_v(&__ret, __p0, 11); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ - __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld2q_bf16(__p0) __extension__ ({ \ - bfloat16x8x2_t __ret; \ - __builtin_neon_vld2q_v(&__ret, __p0, 43); \ - __ret; \ -}) -#else -#define vld2q_bf16(__p0) __extension__ ({ \ - bfloat16x8x2_t __ret; \ - __builtin_neon_vld2q_v(&__ret, __p0, 43); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld2_bf16(__p0) __extension__ ({ \ - bfloat16x4x2_t __ret; \ - __builtin_neon_vld2_v(&__ret, __p0, 11); \ - __ret; \ -}) -#else -#define vld2_bf16(__p0) __extension__ ({ \ - bfloat16x4x2_t __ret; \ - __builtin_neon_vld2_v(&__ret, __p0, 11); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld2q_dup_bf16(__p0) __extension__ ({ \ - bfloat16x8x2_t __ret; \ - __builtin_neon_vld2q_dup_v(&__ret, __p0, 43); \ - __ret; \ -}) -#else -#define vld2q_dup_bf16(__p0) __extension__ ({ \ - bfloat16x8x2_t __ret; \ - __builtin_neon_vld2q_dup_v(&__ret, __p0, 43); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld2_dup_bf16(__p0) __extension__ ({ \ - bfloat16x4x2_t __ret; \ - __builtin_neon_vld2_dup_v(&__ret, __p0, 11); \ - __ret; \ -}) -#else -#define vld2_dup_bf16(__p0) __extension__ ({ \ - bfloat16x4x2_t __ret; \ - __builtin_neon_vld2_dup_v(&__ret, __p0, 11); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld2q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ - bfloat16x8x2_t __ret; \ - bfloat16x8x2_t __s1 = __p1; \ - __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 43); \ - __ret; \ -}) -#else -#define vld2q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ - bfloat16x8x2_t __ret; \ - bfloat16x8x2_t __s1 = __p1; \ - bfloat16x8x2_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 43); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld2_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ - bfloat16x4x2_t __ret; \ - bfloat16x4x2_t __s1 = __p1; \ - __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 11); \ - __ret; \ -}) -#else -#define vld2_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ - bfloat16x4x2_t __ret; \ - bfloat16x4x2_t __s1 = __p1; \ - bfloat16x4x2_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ - __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 11); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld3q_bf16(__p0) __extension__ ({ \ - bfloat16x8x3_t __ret; \ - __builtin_neon_vld3q_v(&__ret, __p0, 43); \ - __ret; \ -}) -#else -#define vld3q_bf16(__p0) __extension__ ({ \ - bfloat16x8x3_t __ret; \ - __builtin_neon_vld3q_v(&__ret, __p0, 43); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld3_bf16(__p0) __extension__ ({ \ - bfloat16x4x3_t __ret; \ - __builtin_neon_vld3_v(&__ret, __p0, 11); \ - __ret; \ -}) -#else -#define vld3_bf16(__p0) __extension__ ({ \ - bfloat16x4x3_t __ret; \ - __builtin_neon_vld3_v(&__ret, __p0, 11); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld3q_dup_bf16(__p0) __extension__ ({ \ - bfloat16x8x3_t __ret; \ - __builtin_neon_vld3q_dup_v(&__ret, __p0, 43); \ - __ret; \ -}) -#else -#define vld3q_dup_bf16(__p0) __extension__ ({ \ - bfloat16x8x3_t __ret; \ - __builtin_neon_vld3q_dup_v(&__ret, __p0, 43); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld3_dup_bf16(__p0) __extension__ ({ \ - bfloat16x4x3_t __ret; \ - __builtin_neon_vld3_dup_v(&__ret, __p0, 11); \ - __ret; \ -}) -#else -#define vld3_dup_bf16(__p0) __extension__ ({ \ - bfloat16x4x3_t __ret; \ - __builtin_neon_vld3_dup_v(&__ret, __p0, 11); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld3q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ - bfloat16x8x3_t __ret; \ - bfloat16x8x3_t __s1 = __p1; \ - __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 43); \ - __ret; \ -}) -#else -#define vld3q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ - bfloat16x8x3_t __ret; \ - bfloat16x8x3_t __s1 = __p1; \ - bfloat16x8x3_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 43); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld3_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ - bfloat16x4x3_t __ret; \ - bfloat16x4x3_t __s1 = __p1; \ - __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 11); \ - __ret; \ -}) -#else -#define vld3_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ - bfloat16x4x3_t __ret; \ - bfloat16x4x3_t __s1 = __p1; \ - bfloat16x4x3_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ - __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 11); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld4q_bf16(__p0) __extension__ ({ \ - bfloat16x8x4_t __ret; \ - __builtin_neon_vld4q_v(&__ret, __p0, 43); \ - __ret; \ -}) -#else -#define vld4q_bf16(__p0) __extension__ ({ \ - bfloat16x8x4_t __ret; \ - __builtin_neon_vld4q_v(&__ret, __p0, 43); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld4_bf16(__p0) __extension__ ({ \ - bfloat16x4x4_t __ret; \ - __builtin_neon_vld4_v(&__ret, __p0, 11); \ - __ret; \ -}) -#else -#define vld4_bf16(__p0) __extension__ ({ \ - bfloat16x4x4_t __ret; \ - __builtin_neon_vld4_v(&__ret, __p0, 11); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ - __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld4q_dup_bf16(__p0) __extension__ ({ \ - bfloat16x8x4_t __ret; \ - __builtin_neon_vld4q_dup_v(&__ret, __p0, 43); \ - __ret; \ -}) -#else -#define vld4q_dup_bf16(__p0) __extension__ ({ \ - bfloat16x8x4_t __ret; \ - __builtin_neon_vld4q_dup_v(&__ret, __p0, 43); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld4_dup_bf16(__p0) __extension__ ({ \ - bfloat16x4x4_t __ret; \ - __builtin_neon_vld4_dup_v(&__ret, __p0, 11); \ - __ret; \ -}) -#else -#define vld4_dup_bf16(__p0) __extension__ ({ \ - bfloat16x4x4_t __ret; \ - __builtin_neon_vld4_dup_v(&__ret, __p0, 11); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ - __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld4q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ - bfloat16x8x4_t __ret; \ - bfloat16x8x4_t __s1 = __p1; \ - __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 43); \ - __ret; \ -}) -#else -#define vld4q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ - bfloat16x8x4_t __ret; \ - bfloat16x8x4_t __s1 = __p1; \ - bfloat16x8x4_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 43); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld4_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ - bfloat16x4x4_t __ret; \ - bfloat16x4x4_t __s1 = __p1; \ - __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 11); \ - __ret; \ -}) -#else -#define vld4_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ - bfloat16x4x4_t __ret; \ - bfloat16x4x4_t __s1 = __p1; \ - bfloat16x4x4_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ - __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ - __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 11); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ - __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vsetq_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ - bfloat16x8_t __ret; \ - bfloat16_t __s0 = __p0; \ - bfloat16x8_t __s1 = __p1; \ - __ret = (bfloat16x8_t) __builtin_neon_vsetq_lane_bf16(__s0, (bfloat16x8_t)__s1, __p2); \ - __ret; \ -}) -#else -#define vsetq_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ - bfloat16x8_t __ret; \ - bfloat16_t __s0 = __p0; \ - bfloat16x8_t __s1 = __p1; \ - bfloat16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret = (bfloat16x8_t) __builtin_neon_vsetq_lane_bf16(__s0, (bfloat16x8_t)__rev1, __p2); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#define __noswap_vsetq_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ - bfloat16x8_t __ret; \ - bfloat16_t __s0 = __p0; \ - bfloat16x8_t __s1 = __p1; \ - __ret = (bfloat16x8_t) __builtin_neon_vsetq_lane_bf16(__s0, (bfloat16x8_t)__s1, __p2); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vset_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ - bfloat16x4_t __ret; \ - bfloat16_t __s0 = __p0; \ - bfloat16x4_t __s1 = __p1; \ - __ret = (bfloat16x4_t) __builtin_neon_vset_lane_bf16(__s0, (bfloat16x4_t)__s1, __p2); \ - __ret; \ -}) -#else -#define vset_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ - bfloat16x4_t __ret; \ - bfloat16_t __s0 = __p0; \ - bfloat16x4_t __s1 = __p1; \ - bfloat16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ - __ret = (bfloat16x4_t) __builtin_neon_vset_lane_bf16(__s0, (bfloat16x4_t)__rev1, __p2); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#define __noswap_vset_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ - bfloat16x4_t __ret; \ - bfloat16_t __s0 = __p0; \ - bfloat16x4_t __s1 = __p1; \ - __ret = (bfloat16x4_t) __builtin_neon_vset_lane_bf16(__s0, (bfloat16x4_t)__s1, __p2); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst1q_bf16(__p0, __p1) __extension__ ({ \ - bfloat16x8_t __s1 = __p1; \ - __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 43); \ -}) -#else -#define vst1q_bf16(__p0, __p1) __extension__ ({ \ - bfloat16x8_t __s1 = __p1; \ - bfloat16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 43); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst1_bf16(__p0, __p1) __extension__ ({ \ - bfloat16x4_t __s1 = __p1; \ - __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 11); \ -}) -#else -#define vst1_bf16(__p0, __p1) __extension__ ({ \ - bfloat16x4_t __s1 = __p1; \ - bfloat16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ - __builtin_neon_vst1_v(__p0, (int8x8_t)__rev1, 11); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst1q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ - bfloat16x8_t __s1 = __p1; \ - __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 43); \ -}) -#else -#define vst1q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ - bfloat16x8_t __s1 = __p1; \ - bfloat16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 43); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst1_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ - bfloat16x4_t __s1 = __p1; \ - __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 11); \ -}) -#else -#define vst1_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ - bfloat16x4_t __s1 = __p1; \ - bfloat16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ - __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__rev1, __p2, 11); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst1q_bf16_x2(__p0, __p1) __extension__ ({ \ - bfloat16x8x2_t __s1 = __p1; \ - __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 43); \ -}) -#else -#define vst1q_bf16_x2(__p0, __p1) __extension__ ({ \ - bfloat16x8x2_t __s1 = __p1; \ - bfloat16x8x2_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 43); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst1_bf16_x2(__p0, __p1) __extension__ ({ \ - bfloat16x4x2_t __s1 = __p1; \ - __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 11); \ -}) -#else -#define vst1_bf16_x2(__p0, __p1) __extension__ ({ \ - bfloat16x4x2_t __s1 = __p1; \ - bfloat16x4x2_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ - __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 11); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst1q_bf16_x3(__p0, __p1) __extension__ ({ \ - bfloat16x8x3_t __s1 = __p1; \ - __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 43); \ -}) -#else -#define vst1q_bf16_x3(__p0, __p1) __extension__ ({ \ - bfloat16x8x3_t __s1 = __p1; \ - bfloat16x8x3_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 43); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst1_bf16_x3(__p0, __p1) __extension__ ({ \ - bfloat16x4x3_t __s1 = __p1; \ - __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 11); \ -}) -#else -#define vst1_bf16_x3(__p0, __p1) __extension__ ({ \ - bfloat16x4x3_t __s1 = __p1; \ - bfloat16x4x3_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ - __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 11); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst1q_bf16_x4(__p0, __p1) __extension__ ({ \ - bfloat16x8x4_t __s1 = __p1; \ - __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 43); \ -}) -#else -#define vst1q_bf16_x4(__p0, __p1) __extension__ ({ \ - bfloat16x8x4_t __s1 = __p1; \ - bfloat16x8x4_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 43); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst1_bf16_x4(__p0, __p1) __extension__ ({ \ - bfloat16x4x4_t __s1 = __p1; \ - __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 11); \ -}) -#else -#define vst1_bf16_x4(__p0, __p1) __extension__ ({ \ - bfloat16x4x4_t __s1 = __p1; \ - bfloat16x4x4_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ - __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ - __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 11); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst2q_bf16(__p0, __p1) __extension__ ({ \ - bfloat16x8x2_t __s1 = __p1; \ - __builtin_neon_vst2q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 43); \ -}) -#else -#define vst2q_bf16(__p0, __p1) __extension__ ({ \ - bfloat16x8x2_t __s1 = __p1; \ - bfloat16x8x2_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vst2q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 43); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst2_bf16(__p0, __p1) __extension__ ({ \ - bfloat16x4x2_t __s1 = __p1; \ - __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 11); \ -}) -#else -#define vst2_bf16(__p0, __p1) __extension__ ({ \ - bfloat16x4x2_t __s1 = __p1; \ - bfloat16x4x2_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ - __builtin_neon_vst2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 11); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst2q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ - bfloat16x8x2_t __s1 = __p1; \ - __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 43); \ -}) -#else -#define vst2q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ - bfloat16x8x2_t __s1 = __p1; \ - bfloat16x8x2_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 43); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst2_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ - bfloat16x4x2_t __s1 = __p1; \ - __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 11); \ -}) -#else -#define vst2_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ - bfloat16x4x2_t __s1 = __p1; \ - bfloat16x4x2_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ - __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 11); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst3q_bf16(__p0, __p1) __extension__ ({ \ - bfloat16x8x3_t __s1 = __p1; \ - __builtin_neon_vst3q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 43); \ -}) -#else -#define vst3q_bf16(__p0, __p1) __extension__ ({ \ - bfloat16x8x3_t __s1 = __p1; \ - bfloat16x8x3_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vst3q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 43); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst3_bf16(__p0, __p1) __extension__ ({ \ - bfloat16x4x3_t __s1 = __p1; \ - __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 11); \ -}) -#else -#define vst3_bf16(__p0, __p1) __extension__ ({ \ - bfloat16x4x3_t __s1 = __p1; \ - bfloat16x4x3_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ - __builtin_neon_vst3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 11); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst3q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ - bfloat16x8x3_t __s1 = __p1; \ - __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 43); \ -}) -#else -#define vst3q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ - bfloat16x8x3_t __s1 = __p1; \ - bfloat16x8x3_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 43); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst3_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ - bfloat16x4x3_t __s1 = __p1; \ - __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 11); \ -}) -#else -#define vst3_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ - bfloat16x4x3_t __s1 = __p1; \ - bfloat16x4x3_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ - __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 11); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst4q_bf16(__p0, __p1) __extension__ ({ \ - bfloat16x8x4_t __s1 = __p1; \ - __builtin_neon_vst4q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 43); \ -}) -#else -#define vst4q_bf16(__p0, __p1) __extension__ ({ \ - bfloat16x8x4_t __s1 = __p1; \ - bfloat16x8x4_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vst4q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 43); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst4_bf16(__p0, __p1) __extension__ ({ \ - bfloat16x4x4_t __s1 = __p1; \ - __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 11); \ -}) -#else -#define vst4_bf16(__p0, __p1) __extension__ ({ \ - bfloat16x4x4_t __s1 = __p1; \ - bfloat16x4x4_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ - __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ - __builtin_neon_vst4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 11); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst4q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ - bfloat16x8x4_t __s1 = __p1; \ - __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 43); \ -}) -#else -#define vst4q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ - bfloat16x8x4_t __s1 = __p1; \ - bfloat16x8x4_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 43); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst4_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ - bfloat16x4x4_t __s1 = __p1; \ - __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 11); \ -}) -#else -#define vst4_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ - bfloat16x4x4_t __s1 = __p1; \ - bfloat16x4x4_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ - __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ - __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 11); \ -}) -#endif - -#endif -#if defined(__ARM_FEATURE_BF16_VECTOR_ARITHMETIC) && !defined(__aarch64__) -#ifdef __LITTLE_ENDIAN__ -__ai bfloat16x4_t __a32_vcvt_bf16_f32(float32x4_t __p0) { - bfloat16x4_t __ret; - __ret = (bfloat16x4_t) __builtin_neon___a32_vcvt_bf16_v((int8x16_t)__p0, 11); - return __ret; -} -#else -__ai bfloat16x4_t __a32_vcvt_bf16_f32(float32x4_t __p0) { - bfloat16x4_t __ret; - float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - __ret = (bfloat16x4_t) __builtin_neon___a32_vcvt_bf16_v((int8x16_t)__rev0, 11); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -__ai bfloat16x4_t __noswap___a32_vcvt_bf16_f32(float32x4_t __p0) { - bfloat16x4_t __ret; - __ret = (bfloat16x4_t) __builtin_neon___a32_vcvt_bf16_v((int8x16_t)__p0, 11); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai bfloat16x4_t vcvt_bf16_f32(float32x4_t __p0) { - bfloat16x4_t __ret; - __ret = __a32_vcvt_bf16_f32(__p0); - return __ret; -} -#else -__ai bfloat16x4_t vcvt_bf16_f32(float32x4_t __p0) { - bfloat16x4_t __ret; - float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - __ret = __noswap___a32_vcvt_bf16_f32(__rev0); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai bfloat16x8_t vcvtq_high_bf16_f32(bfloat16x8_t __p0, float32x4_t __p1) { - bfloat16x8_t __ret; - __ret = vcombine_bf16(__a32_vcvt_bf16_f32(__p1), vget_low_bf16(__p0)); - return __ret; -} -#else -__ai bfloat16x8_t vcvtq_high_bf16_f32(bfloat16x8_t __p0, float32x4_t __p1) { - bfloat16x8_t __ret; - bfloat16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = __noswap_vcombine_bf16(__noswap___a32_vcvt_bf16_f32(__rev1), __noswap_vget_low_bf16(__rev0)); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai bfloat16x8_t vcvtq_low_bf16_f32(float32x4_t __p0) { - bfloat16x8_t __ret; - __ret = vcombine_bf16((bfloat16x4_t)(0ULL), __a32_vcvt_bf16_f32(__p0)); - return __ret; -} -#else -__ai bfloat16x8_t vcvtq_low_bf16_f32(float32x4_t __p0) { - bfloat16x8_t __ret; - float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - __ret = __noswap_vcombine_bf16((bfloat16x4_t)(0ULL), __noswap___a32_vcvt_bf16_f32(__rev0)); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#endif -#if defined(__ARM_FEATURE_BF16_VECTOR_ARITHMETIC) && defined(__aarch64__) -#ifdef __LITTLE_ENDIAN__ -__ai bfloat16x8_t __a64_vcvtq_low_bf16_f32(float32x4_t __p0) { - bfloat16x8_t __ret; - __ret = (bfloat16x8_t) __builtin_neon___a64_vcvtq_low_bf16_v((int8x16_t)__p0, 43); - return __ret; -} -#else -__ai bfloat16x8_t __a64_vcvtq_low_bf16_f32(float32x4_t __p0) { - bfloat16x8_t __ret; - float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - __ret = (bfloat16x8_t) __builtin_neon___a64_vcvtq_low_bf16_v((int8x16_t)__rev0, 43); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -__ai bfloat16x8_t __noswap___a64_vcvtq_low_bf16_f32(float32x4_t __p0) { - bfloat16x8_t __ret; - __ret = (bfloat16x8_t) __builtin_neon___a64_vcvtq_low_bf16_v((int8x16_t)__p0, 43); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcopyq_lane_bf16(__p0_161, __p1_161, __p2_161, __p3_161) __extension__ ({ \ - bfloat16x8_t __ret_161; \ - bfloat16x8_t __s0_161 = __p0_161; \ - bfloat16x4_t __s2_161 = __p2_161; \ - __ret_161 = vsetq_lane_bf16(vget_lane_bf16(__s2_161, __p3_161), __s0_161, __p1_161); \ - __ret_161; \ -}) -#else -#define vcopyq_lane_bf16(__p0_162, __p1_162, __p2_162, __p3_162) __extension__ ({ \ - bfloat16x8_t __ret_162; \ - bfloat16x8_t __s0_162 = __p0_162; \ - bfloat16x4_t __s2_162 = __p2_162; \ - bfloat16x8_t __rev0_162; __rev0_162 = __builtin_shufflevector(__s0_162, __s0_162, 7, 6, 5, 4, 3, 2, 1, 0); \ - bfloat16x4_t __rev2_162; __rev2_162 = __builtin_shufflevector(__s2_162, __s2_162, 3, 2, 1, 0); \ - __ret_162 = __noswap_vsetq_lane_bf16(__noswap_vget_lane_bf16(__rev2_162, __p3_162), __rev0_162, __p1_162); \ - __ret_162 = __builtin_shufflevector(__ret_162, __ret_162, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_162; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcopy_lane_bf16(__p0_163, __p1_163, __p2_163, __p3_163) __extension__ ({ \ - bfloat16x4_t __ret_163; \ - bfloat16x4_t __s0_163 = __p0_163; \ - bfloat16x4_t __s2_163 = __p2_163; \ - __ret_163 = vset_lane_bf16(vget_lane_bf16(__s2_163, __p3_163), __s0_163, __p1_163); \ - __ret_163; \ -}) -#else -#define vcopy_lane_bf16(__p0_164, __p1_164, __p2_164, __p3_164) __extension__ ({ \ - bfloat16x4_t __ret_164; \ - bfloat16x4_t __s0_164 = __p0_164; \ - bfloat16x4_t __s2_164 = __p2_164; \ - bfloat16x4_t __rev0_164; __rev0_164 = __builtin_shufflevector(__s0_164, __s0_164, 3, 2, 1, 0); \ - bfloat16x4_t __rev2_164; __rev2_164 = __builtin_shufflevector(__s2_164, __s2_164, 3, 2, 1, 0); \ - __ret_164 = __noswap_vset_lane_bf16(__noswap_vget_lane_bf16(__rev2_164, __p3_164), __rev0_164, __p1_164); \ - __ret_164 = __builtin_shufflevector(__ret_164, __ret_164, 3, 2, 1, 0); \ - __ret_164; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcopyq_laneq_bf16(__p0_165, __p1_165, __p2_165, __p3_165) __extension__ ({ \ - bfloat16x8_t __ret_165; \ - bfloat16x8_t __s0_165 = __p0_165; \ - bfloat16x8_t __s2_165 = __p2_165; \ - __ret_165 = vsetq_lane_bf16(vgetq_lane_bf16(__s2_165, __p3_165), __s0_165, __p1_165); \ - __ret_165; \ -}) -#else -#define vcopyq_laneq_bf16(__p0_166, __p1_166, __p2_166, __p3_166) __extension__ ({ \ - bfloat16x8_t __ret_166; \ - bfloat16x8_t __s0_166 = __p0_166; \ - bfloat16x8_t __s2_166 = __p2_166; \ - bfloat16x8_t __rev0_166; __rev0_166 = __builtin_shufflevector(__s0_166, __s0_166, 7, 6, 5, 4, 3, 2, 1, 0); \ - bfloat16x8_t __rev2_166; __rev2_166 = __builtin_shufflevector(__s2_166, __s2_166, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_166 = __noswap_vsetq_lane_bf16(__noswap_vgetq_lane_bf16(__rev2_166, __p3_166), __rev0_166, __p1_166); \ - __ret_166 = __builtin_shufflevector(__ret_166, __ret_166, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_166; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcopy_laneq_bf16(__p0_167, __p1_167, __p2_167, __p3_167) __extension__ ({ \ - bfloat16x4_t __ret_167; \ - bfloat16x4_t __s0_167 = __p0_167; \ - bfloat16x8_t __s2_167 = __p2_167; \ - __ret_167 = vset_lane_bf16(vgetq_lane_bf16(__s2_167, __p3_167), __s0_167, __p1_167); \ - __ret_167; \ -}) -#else -#define vcopy_laneq_bf16(__p0_168, __p1_168, __p2_168, __p3_168) __extension__ ({ \ - bfloat16x4_t __ret_168; \ - bfloat16x4_t __s0_168 = __p0_168; \ - bfloat16x8_t __s2_168 = __p2_168; \ - bfloat16x4_t __rev0_168; __rev0_168 = __builtin_shufflevector(__s0_168, __s0_168, 3, 2, 1, 0); \ - bfloat16x8_t __rev2_168; __rev2_168 = __builtin_shufflevector(__s2_168, __s2_168, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_168 = __noswap_vset_lane_bf16(__noswap_vgetq_lane_bf16(__rev2_168, __p3_168), __rev0_168, __p1_168); \ - __ret_168 = __builtin_shufflevector(__ret_168, __ret_168, 3, 2, 1, 0); \ - __ret_168; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai bfloat16x4_t vcvt_bf16_f32(float32x4_t __p0) { - bfloat16x4_t __ret; - __ret = vget_low_bf16(__a64_vcvtq_low_bf16_f32(__p0)); - return __ret; -} -#else -__ai bfloat16x4_t vcvt_bf16_f32(float32x4_t __p0) { - bfloat16x4_t __ret; - float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - __ret = __noswap_vget_low_bf16(__noswap___a64_vcvtq_low_bf16_f32(__rev0)); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai bfloat16x8_t vcvtq_high_bf16_f32(bfloat16x8_t __p0, float32x4_t __p1) { - bfloat16x8_t __ret; - __ret = (bfloat16x8_t) __builtin_neon_vcvtq_high_bf16_v((int8x16_t)__p0, (int8x16_t)__p1, 43); - return __ret; -} -#else -__ai bfloat16x8_t vcvtq_high_bf16_f32(bfloat16x8_t __p0, float32x4_t __p1) { - bfloat16x8_t __ret; - bfloat16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = (bfloat16x8_t) __builtin_neon_vcvtq_high_bf16_v((int8x16_t)__rev0, (int8x16_t)__rev1, 43); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai bfloat16x8_t vcvtq_low_bf16_f32(float32x4_t __p0) { - bfloat16x8_t __ret; - __ret = __a64_vcvtq_low_bf16_f32(__p0); - return __ret; -} -#else -__ai bfloat16x8_t vcvtq_low_bf16_f32(float32x4_t __p0) { - bfloat16x8_t __ret; - float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - __ret = __noswap___a64_vcvtq_low_bf16_f32(__rev0); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#endif -#if defined(__ARM_FEATURE_COMPLEX) -#ifdef __LITTLE_ENDIAN__ -__ai float32x2_t vcadd_rot270_f32(float32x2_t __p0, float32x2_t __p1) { - float32x2_t __ret; - __ret = (float32x2_t) __builtin_neon_vcadd_rot270_v((int8x8_t)__p0, (int8x8_t)__p1, 9); - return __ret; -} -#else -__ai float32x2_t vcadd_rot270_f32(float32x2_t __p0, float32x2_t __p1) { - float32x2_t __ret; - float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __ret = (float32x2_t) __builtin_neon_vcadd_rot270_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float32x2_t vcadd_rot90_f32(float32x2_t __p0, float32x2_t __p1) { - float32x2_t __ret; - __ret = (float32x2_t) __builtin_neon_vcadd_rot90_v((int8x8_t)__p0, (int8x8_t)__p1, 9); - return __ret; -} -#else -__ai float32x2_t vcadd_rot90_f32(float32x2_t __p0, float32x2_t __p1) { - float32x2_t __ret; - float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __ret = (float32x2_t) __builtin_neon_vcadd_rot90_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float32x4_t vcaddq_rot270_f32(float32x4_t __p0, float32x4_t __p1) { - float32x4_t __ret; - __ret = (float32x4_t) __builtin_neon_vcaddq_rot270_v((int8x16_t)__p0, (int8x16_t)__p1, 41); - return __ret; -} -#else -__ai float32x4_t vcaddq_rot270_f32(float32x4_t __p0, float32x4_t __p1) { - float32x4_t __ret; - float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = (float32x4_t) __builtin_neon_vcaddq_rot270_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float32x4_t vcaddq_rot90_f32(float32x4_t __p0, float32x4_t __p1) { - float32x4_t __ret; - __ret = (float32x4_t) __builtin_neon_vcaddq_rot90_v((int8x16_t)__p0, (int8x16_t)__p1, 41); - return __ret; -} -#else -__ai float32x4_t vcaddq_rot90_f32(float32x4_t __p0, float32x4_t __p1) { - float32x4_t __ret; - float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = (float32x4_t) __builtin_neon_vcaddq_rot90_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float32x4_t vcmlaq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) { - float32x4_t __ret; - __ret = (float32x4_t) __builtin_neon_vcmlaq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); - return __ret; -} -#else -__ai float32x4_t vcmlaq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) { - float32x4_t __ret; - float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - float32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); - __ret = (float32x4_t) __builtin_neon_vcmlaq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -__ai float32x4_t __noswap_vcmlaq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) { - float32x4_t __ret; - __ret = (float32x4_t) __builtin_neon_vcmlaq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float32x2_t vcmla_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) { - float32x2_t __ret; - __ret = (float32x2_t) __builtin_neon_vcmla_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9); - return __ret; -} -#else -__ai float32x2_t vcmla_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) { - float32x2_t __ret; - float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - float32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); - __ret = (float32x2_t) __builtin_neon_vcmla_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 9); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -__ai float32x2_t __noswap_vcmla_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) { - float32x2_t __ret; - __ret = (float32x2_t) __builtin_neon_vcmla_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcmla_lane_f32(__p0_169, __p1_169, __p2_169, __p3_169) __extension__ ({ \ - float32x2_t __ret_169; \ - float32x2_t __s0_169 = __p0_169; \ - float32x2_t __s1_169 = __p1_169; \ - float32x2_t __s2_169 = __p2_169; \ -float32x2_t __reint_169 = __s2_169; \ -uint64x1_t __reint1_169 = (uint64x1_t) {vget_lane_u64(*(uint64x1_t *) &__reint_169, __p3_169)}; \ - __ret_169 = vcmla_f32(__s0_169, __s1_169, *(float32x2_t *) &__reint1_169); \ - __ret_169; \ -}) -#else -#define vcmla_lane_f32(__p0_170, __p1_170, __p2_170, __p3_170) __extension__ ({ \ - float32x2_t __ret_170; \ - float32x2_t __s0_170 = __p0_170; \ - float32x2_t __s1_170 = __p1_170; \ - float32x2_t __s2_170 = __p2_170; \ - float32x2_t __rev0_170; __rev0_170 = __builtin_shufflevector(__s0_170, __s0_170, 1, 0); \ - float32x2_t __rev1_170; __rev1_170 = __builtin_shufflevector(__s1_170, __s1_170, 1, 0); \ - float32x2_t __rev2_170; __rev2_170 = __builtin_shufflevector(__s2_170, __s2_170, 1, 0); \ -float32x2_t __reint_170 = __rev2_170; \ -uint64x1_t __reint1_170 = (uint64x1_t) {vget_lane_u64(*(uint64x1_t *) &__reint_170, __p3_170)}; \ - __ret_170 = __noswap_vcmla_f32(__rev0_170, __rev1_170, *(float32x2_t *) &__reint1_170); \ - __ret_170 = __builtin_shufflevector(__ret_170, __ret_170, 1, 0); \ - __ret_170; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcmlaq_lane_f32(__p0_171, __p1_171, __p2_171, __p3_171) __extension__ ({ \ - float32x4_t __ret_171; \ - float32x4_t __s0_171 = __p0_171; \ - float32x4_t __s1_171 = __p1_171; \ - float32x2_t __s2_171 = __p2_171; \ -float32x2_t __reint_171 = __s2_171; \ -uint64x2_t __reint1_171 = (uint64x2_t) {vget_lane_u64(*(uint64x1_t *) &__reint_171, __p3_171), vget_lane_u64(*(uint64x1_t *) &__reint_171, __p3_171)}; \ - __ret_171 = vcmlaq_f32(__s0_171, __s1_171, *(float32x4_t *) &__reint1_171); \ - __ret_171; \ -}) -#else -#define vcmlaq_lane_f32(__p0_172, __p1_172, __p2_172, __p3_172) __extension__ ({ \ - float32x4_t __ret_172; \ - float32x4_t __s0_172 = __p0_172; \ - float32x4_t __s1_172 = __p1_172; \ - float32x2_t __s2_172 = __p2_172; \ - float32x4_t __rev0_172; __rev0_172 = __builtin_shufflevector(__s0_172, __s0_172, 3, 2, 1, 0); \ - float32x4_t __rev1_172; __rev1_172 = __builtin_shufflevector(__s1_172, __s1_172, 3, 2, 1, 0); \ - float32x2_t __rev2_172; __rev2_172 = __builtin_shufflevector(__s2_172, __s2_172, 1, 0); \ -float32x2_t __reint_172 = __rev2_172; \ -uint64x2_t __reint1_172 = (uint64x2_t) {vget_lane_u64(*(uint64x1_t *) &__reint_172, __p3_172), vget_lane_u64(*(uint64x1_t *) &__reint_172, __p3_172)}; \ - __ret_172 = __noswap_vcmlaq_f32(__rev0_172, __rev1_172, *(float32x4_t *) &__reint1_172); \ - __ret_172 = __builtin_shufflevector(__ret_172, __ret_172, 3, 2, 1, 0); \ - __ret_172; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcmla_laneq_f32(__p0_173, __p1_173, __p2_173, __p3_173) __extension__ ({ \ - float32x2_t __ret_173; \ - float32x2_t __s0_173 = __p0_173; \ - float32x2_t __s1_173 = __p1_173; \ - float32x4_t __s2_173 = __p2_173; \ -float32x4_t __reint_173 = __s2_173; \ -uint64x1_t __reint1_173 = (uint64x1_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_173, __p3_173)}; \ - __ret_173 = vcmla_f32(__s0_173, __s1_173, *(float32x2_t *) &__reint1_173); \ - __ret_173; \ -}) -#else -#define vcmla_laneq_f32(__p0_174, __p1_174, __p2_174, __p3_174) __extension__ ({ \ - float32x2_t __ret_174; \ - float32x2_t __s0_174 = __p0_174; \ - float32x2_t __s1_174 = __p1_174; \ - float32x4_t __s2_174 = __p2_174; \ - float32x2_t __rev0_174; __rev0_174 = __builtin_shufflevector(__s0_174, __s0_174, 1, 0); \ - float32x2_t __rev1_174; __rev1_174 = __builtin_shufflevector(__s1_174, __s1_174, 1, 0); \ - float32x4_t __rev2_174; __rev2_174 = __builtin_shufflevector(__s2_174, __s2_174, 3, 2, 1, 0); \ -float32x4_t __reint_174 = __rev2_174; \ -uint64x1_t __reint1_174 = (uint64x1_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_174, __p3_174)}; \ - __ret_174 = __noswap_vcmla_f32(__rev0_174, __rev1_174, *(float32x2_t *) &__reint1_174); \ - __ret_174 = __builtin_shufflevector(__ret_174, __ret_174, 1, 0); \ - __ret_174; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcmlaq_laneq_f32(__p0_175, __p1_175, __p2_175, __p3_175) __extension__ ({ \ - float32x4_t __ret_175; \ - float32x4_t __s0_175 = __p0_175; \ - float32x4_t __s1_175 = __p1_175; \ - float32x4_t __s2_175 = __p2_175; \ -float32x4_t __reint_175 = __s2_175; \ -uint64x2_t __reint1_175 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_175, __p3_175), vgetq_lane_u64(*(uint64x2_t *) &__reint_175, __p3_175)}; \ - __ret_175 = vcmlaq_f32(__s0_175, __s1_175, *(float32x4_t *) &__reint1_175); \ - __ret_175; \ -}) -#else -#define vcmlaq_laneq_f32(__p0_176, __p1_176, __p2_176, __p3_176) __extension__ ({ \ - float32x4_t __ret_176; \ - float32x4_t __s0_176 = __p0_176; \ - float32x4_t __s1_176 = __p1_176; \ - float32x4_t __s2_176 = __p2_176; \ - float32x4_t __rev0_176; __rev0_176 = __builtin_shufflevector(__s0_176, __s0_176, 3, 2, 1, 0); \ - float32x4_t __rev1_176; __rev1_176 = __builtin_shufflevector(__s1_176, __s1_176, 3, 2, 1, 0); \ - float32x4_t __rev2_176; __rev2_176 = __builtin_shufflevector(__s2_176, __s2_176, 3, 2, 1, 0); \ -float32x4_t __reint_176 = __rev2_176; \ -uint64x2_t __reint1_176 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_176, __p3_176), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_176, __p3_176)}; \ - __ret_176 = __noswap_vcmlaq_f32(__rev0_176, __rev1_176, *(float32x4_t *) &__reint1_176); \ - __ret_176 = __builtin_shufflevector(__ret_176, __ret_176, 3, 2, 1, 0); \ - __ret_176; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float32x4_t vcmlaq_rot180_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) { - float32x4_t __ret; - __ret = (float32x4_t) __builtin_neon_vcmlaq_rot180_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); - return __ret; -} -#else -__ai float32x4_t vcmlaq_rot180_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) { - float32x4_t __ret; - float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - float32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); - __ret = (float32x4_t) __builtin_neon_vcmlaq_rot180_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -__ai float32x4_t __noswap_vcmlaq_rot180_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) { - float32x4_t __ret; - __ret = (float32x4_t) __builtin_neon_vcmlaq_rot180_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float32x2_t vcmla_rot180_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) { - float32x2_t __ret; - __ret = (float32x2_t) __builtin_neon_vcmla_rot180_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9); - return __ret; -} -#else -__ai float32x2_t vcmla_rot180_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) { - float32x2_t __ret; - float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - float32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); - __ret = (float32x2_t) __builtin_neon_vcmla_rot180_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 9); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -__ai float32x2_t __noswap_vcmla_rot180_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) { - float32x2_t __ret; - __ret = (float32x2_t) __builtin_neon_vcmla_rot180_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcmla_rot180_lane_f32(__p0_177, __p1_177, __p2_177, __p3_177) __extension__ ({ \ - float32x2_t __ret_177; \ - float32x2_t __s0_177 = __p0_177; \ - float32x2_t __s1_177 = __p1_177; \ - float32x2_t __s2_177 = __p2_177; \ -float32x2_t __reint_177 = __s2_177; \ -uint64x1_t __reint1_177 = (uint64x1_t) {vget_lane_u64(*(uint64x1_t *) &__reint_177, __p3_177)}; \ - __ret_177 = vcmla_rot180_f32(__s0_177, __s1_177, *(float32x2_t *) &__reint1_177); \ - __ret_177; \ -}) -#else -#define vcmla_rot180_lane_f32(__p0_178, __p1_178, __p2_178, __p3_178) __extension__ ({ \ - float32x2_t __ret_178; \ - float32x2_t __s0_178 = __p0_178; \ - float32x2_t __s1_178 = __p1_178; \ - float32x2_t __s2_178 = __p2_178; \ - float32x2_t __rev0_178; __rev0_178 = __builtin_shufflevector(__s0_178, __s0_178, 1, 0); \ - float32x2_t __rev1_178; __rev1_178 = __builtin_shufflevector(__s1_178, __s1_178, 1, 0); \ - float32x2_t __rev2_178; __rev2_178 = __builtin_shufflevector(__s2_178, __s2_178, 1, 0); \ -float32x2_t __reint_178 = __rev2_178; \ -uint64x1_t __reint1_178 = (uint64x1_t) {vget_lane_u64(*(uint64x1_t *) &__reint_178, __p3_178)}; \ - __ret_178 = __noswap_vcmla_rot180_f32(__rev0_178, __rev1_178, *(float32x2_t *) &__reint1_178); \ - __ret_178 = __builtin_shufflevector(__ret_178, __ret_178, 1, 0); \ - __ret_178; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcmlaq_rot180_lane_f32(__p0_179, __p1_179, __p2_179, __p3_179) __extension__ ({ \ - float32x4_t __ret_179; \ - float32x4_t __s0_179 = __p0_179; \ - float32x4_t __s1_179 = __p1_179; \ - float32x2_t __s2_179 = __p2_179; \ -float32x2_t __reint_179 = __s2_179; \ -uint64x2_t __reint1_179 = (uint64x2_t) {vget_lane_u64(*(uint64x1_t *) &__reint_179, __p3_179), vget_lane_u64(*(uint64x1_t *) &__reint_179, __p3_179)}; \ - __ret_179 = vcmlaq_rot180_f32(__s0_179, __s1_179, *(float32x4_t *) &__reint1_179); \ - __ret_179; \ -}) -#else -#define vcmlaq_rot180_lane_f32(__p0_180, __p1_180, __p2_180, __p3_180) __extension__ ({ \ - float32x4_t __ret_180; \ - float32x4_t __s0_180 = __p0_180; \ - float32x4_t __s1_180 = __p1_180; \ - float32x2_t __s2_180 = __p2_180; \ - float32x4_t __rev0_180; __rev0_180 = __builtin_shufflevector(__s0_180, __s0_180, 3, 2, 1, 0); \ - float32x4_t __rev1_180; __rev1_180 = __builtin_shufflevector(__s1_180, __s1_180, 3, 2, 1, 0); \ - float32x2_t __rev2_180; __rev2_180 = __builtin_shufflevector(__s2_180, __s2_180, 1, 0); \ -float32x2_t __reint_180 = __rev2_180; \ -uint64x2_t __reint1_180 = (uint64x2_t) {vget_lane_u64(*(uint64x1_t *) &__reint_180, __p3_180), vget_lane_u64(*(uint64x1_t *) &__reint_180, __p3_180)}; \ - __ret_180 = __noswap_vcmlaq_rot180_f32(__rev0_180, __rev1_180, *(float32x4_t *) &__reint1_180); \ - __ret_180 = __builtin_shufflevector(__ret_180, __ret_180, 3, 2, 1, 0); \ - __ret_180; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcmla_rot180_laneq_f32(__p0_181, __p1_181, __p2_181, __p3_181) __extension__ ({ \ - float32x2_t __ret_181; \ - float32x2_t __s0_181 = __p0_181; \ - float32x2_t __s1_181 = __p1_181; \ - float32x4_t __s2_181 = __p2_181; \ -float32x4_t __reint_181 = __s2_181; \ -uint64x1_t __reint1_181 = (uint64x1_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_181, __p3_181)}; \ - __ret_181 = vcmla_rot180_f32(__s0_181, __s1_181, *(float32x2_t *) &__reint1_181); \ - __ret_181; \ -}) -#else -#define vcmla_rot180_laneq_f32(__p0_182, __p1_182, __p2_182, __p3_182) __extension__ ({ \ - float32x2_t __ret_182; \ - float32x2_t __s0_182 = __p0_182; \ - float32x2_t __s1_182 = __p1_182; \ - float32x4_t __s2_182 = __p2_182; \ - float32x2_t __rev0_182; __rev0_182 = __builtin_shufflevector(__s0_182, __s0_182, 1, 0); \ - float32x2_t __rev1_182; __rev1_182 = __builtin_shufflevector(__s1_182, __s1_182, 1, 0); \ - float32x4_t __rev2_182; __rev2_182 = __builtin_shufflevector(__s2_182, __s2_182, 3, 2, 1, 0); \ -float32x4_t __reint_182 = __rev2_182; \ -uint64x1_t __reint1_182 = (uint64x1_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_182, __p3_182)}; \ - __ret_182 = __noswap_vcmla_rot180_f32(__rev0_182, __rev1_182, *(float32x2_t *) &__reint1_182); \ - __ret_182 = __builtin_shufflevector(__ret_182, __ret_182, 1, 0); \ - __ret_182; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcmlaq_rot180_laneq_f32(__p0_183, __p1_183, __p2_183, __p3_183) __extension__ ({ \ - float32x4_t __ret_183; \ - float32x4_t __s0_183 = __p0_183; \ - float32x4_t __s1_183 = __p1_183; \ - float32x4_t __s2_183 = __p2_183; \ -float32x4_t __reint_183 = __s2_183; \ -uint64x2_t __reint1_183 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_183, __p3_183), vgetq_lane_u64(*(uint64x2_t *) &__reint_183, __p3_183)}; \ - __ret_183 = vcmlaq_rot180_f32(__s0_183, __s1_183, *(float32x4_t *) &__reint1_183); \ - __ret_183; \ -}) -#else -#define vcmlaq_rot180_laneq_f32(__p0_184, __p1_184, __p2_184, __p3_184) __extension__ ({ \ - float32x4_t __ret_184; \ - float32x4_t __s0_184 = __p0_184; \ - float32x4_t __s1_184 = __p1_184; \ - float32x4_t __s2_184 = __p2_184; \ - float32x4_t __rev0_184; __rev0_184 = __builtin_shufflevector(__s0_184, __s0_184, 3, 2, 1, 0); \ - float32x4_t __rev1_184; __rev1_184 = __builtin_shufflevector(__s1_184, __s1_184, 3, 2, 1, 0); \ - float32x4_t __rev2_184; __rev2_184 = __builtin_shufflevector(__s2_184, __s2_184, 3, 2, 1, 0); \ -float32x4_t __reint_184 = __rev2_184; \ -uint64x2_t __reint1_184 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_184, __p3_184), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_184, __p3_184)}; \ - __ret_184 = __noswap_vcmlaq_rot180_f32(__rev0_184, __rev1_184, *(float32x4_t *) &__reint1_184); \ - __ret_184 = __builtin_shufflevector(__ret_184, __ret_184, 3, 2, 1, 0); \ - __ret_184; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float32x4_t vcmlaq_rot270_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) { - float32x4_t __ret; - __ret = (float32x4_t) __builtin_neon_vcmlaq_rot270_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); - return __ret; -} -#else -__ai float32x4_t vcmlaq_rot270_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) { - float32x4_t __ret; - float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - float32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); - __ret = (float32x4_t) __builtin_neon_vcmlaq_rot270_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -__ai float32x4_t __noswap_vcmlaq_rot270_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) { - float32x4_t __ret; - __ret = (float32x4_t) __builtin_neon_vcmlaq_rot270_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float32x2_t vcmla_rot270_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) { - float32x2_t __ret; - __ret = (float32x2_t) __builtin_neon_vcmla_rot270_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9); - return __ret; -} -#else -__ai float32x2_t vcmla_rot270_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) { - float32x2_t __ret; - float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - float32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); - __ret = (float32x2_t) __builtin_neon_vcmla_rot270_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 9); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -__ai float32x2_t __noswap_vcmla_rot270_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) { - float32x2_t __ret; - __ret = (float32x2_t) __builtin_neon_vcmla_rot270_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcmla_rot270_lane_f32(__p0_185, __p1_185, __p2_185, __p3_185) __extension__ ({ \ - float32x2_t __ret_185; \ - float32x2_t __s0_185 = __p0_185; \ - float32x2_t __s1_185 = __p1_185; \ - float32x2_t __s2_185 = __p2_185; \ -float32x2_t __reint_185 = __s2_185; \ -uint64x1_t __reint1_185 = (uint64x1_t) {vget_lane_u64(*(uint64x1_t *) &__reint_185, __p3_185)}; \ - __ret_185 = vcmla_rot270_f32(__s0_185, __s1_185, *(float32x2_t *) &__reint1_185); \ - __ret_185; \ -}) -#else -#define vcmla_rot270_lane_f32(__p0_186, __p1_186, __p2_186, __p3_186) __extension__ ({ \ - float32x2_t __ret_186; \ - float32x2_t __s0_186 = __p0_186; \ - float32x2_t __s1_186 = __p1_186; \ - float32x2_t __s2_186 = __p2_186; \ - float32x2_t __rev0_186; __rev0_186 = __builtin_shufflevector(__s0_186, __s0_186, 1, 0); \ - float32x2_t __rev1_186; __rev1_186 = __builtin_shufflevector(__s1_186, __s1_186, 1, 0); \ - float32x2_t __rev2_186; __rev2_186 = __builtin_shufflevector(__s2_186, __s2_186, 1, 0); \ -float32x2_t __reint_186 = __rev2_186; \ -uint64x1_t __reint1_186 = (uint64x1_t) {vget_lane_u64(*(uint64x1_t *) &__reint_186, __p3_186)}; \ - __ret_186 = __noswap_vcmla_rot270_f32(__rev0_186, __rev1_186, *(float32x2_t *) &__reint1_186); \ - __ret_186 = __builtin_shufflevector(__ret_186, __ret_186, 1, 0); \ - __ret_186; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcmlaq_rot270_lane_f32(__p0_187, __p1_187, __p2_187, __p3_187) __extension__ ({ \ - float32x4_t __ret_187; \ - float32x4_t __s0_187 = __p0_187; \ - float32x4_t __s1_187 = __p1_187; \ - float32x2_t __s2_187 = __p2_187; \ -float32x2_t __reint_187 = __s2_187; \ -uint64x2_t __reint1_187 = (uint64x2_t) {vget_lane_u64(*(uint64x1_t *) &__reint_187, __p3_187), vget_lane_u64(*(uint64x1_t *) &__reint_187, __p3_187)}; \ - __ret_187 = vcmlaq_rot270_f32(__s0_187, __s1_187, *(float32x4_t *) &__reint1_187); \ - __ret_187; \ -}) -#else -#define vcmlaq_rot270_lane_f32(__p0_188, __p1_188, __p2_188, __p3_188) __extension__ ({ \ - float32x4_t __ret_188; \ - float32x4_t __s0_188 = __p0_188; \ - float32x4_t __s1_188 = __p1_188; \ - float32x2_t __s2_188 = __p2_188; \ - float32x4_t __rev0_188; __rev0_188 = __builtin_shufflevector(__s0_188, __s0_188, 3, 2, 1, 0); \ - float32x4_t __rev1_188; __rev1_188 = __builtin_shufflevector(__s1_188, __s1_188, 3, 2, 1, 0); \ - float32x2_t __rev2_188; __rev2_188 = __builtin_shufflevector(__s2_188, __s2_188, 1, 0); \ -float32x2_t __reint_188 = __rev2_188; \ -uint64x2_t __reint1_188 = (uint64x2_t) {vget_lane_u64(*(uint64x1_t *) &__reint_188, __p3_188), vget_lane_u64(*(uint64x1_t *) &__reint_188, __p3_188)}; \ - __ret_188 = __noswap_vcmlaq_rot270_f32(__rev0_188, __rev1_188, *(float32x4_t *) &__reint1_188); \ - __ret_188 = __builtin_shufflevector(__ret_188, __ret_188, 3, 2, 1, 0); \ - __ret_188; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcmla_rot270_laneq_f32(__p0_189, __p1_189, __p2_189, __p3_189) __extension__ ({ \ - float32x2_t __ret_189; \ - float32x2_t __s0_189 = __p0_189; \ - float32x2_t __s1_189 = __p1_189; \ - float32x4_t __s2_189 = __p2_189; \ -float32x4_t __reint_189 = __s2_189; \ -uint64x1_t __reint1_189 = (uint64x1_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_189, __p3_189)}; \ - __ret_189 = vcmla_rot270_f32(__s0_189, __s1_189, *(float32x2_t *) &__reint1_189); \ - __ret_189; \ -}) -#else -#define vcmla_rot270_laneq_f32(__p0_190, __p1_190, __p2_190, __p3_190) __extension__ ({ \ - float32x2_t __ret_190; \ - float32x2_t __s0_190 = __p0_190; \ - float32x2_t __s1_190 = __p1_190; \ - float32x4_t __s2_190 = __p2_190; \ - float32x2_t __rev0_190; __rev0_190 = __builtin_shufflevector(__s0_190, __s0_190, 1, 0); \ - float32x2_t __rev1_190; __rev1_190 = __builtin_shufflevector(__s1_190, __s1_190, 1, 0); \ - float32x4_t __rev2_190; __rev2_190 = __builtin_shufflevector(__s2_190, __s2_190, 3, 2, 1, 0); \ -float32x4_t __reint_190 = __rev2_190; \ -uint64x1_t __reint1_190 = (uint64x1_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_190, __p3_190)}; \ - __ret_190 = __noswap_vcmla_rot270_f32(__rev0_190, __rev1_190, *(float32x2_t *) &__reint1_190); \ - __ret_190 = __builtin_shufflevector(__ret_190, __ret_190, 1, 0); \ - __ret_190; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcmlaq_rot270_laneq_f32(__p0_191, __p1_191, __p2_191, __p3_191) __extension__ ({ \ - float32x4_t __ret_191; \ - float32x4_t __s0_191 = __p0_191; \ - float32x4_t __s1_191 = __p1_191; \ - float32x4_t __s2_191 = __p2_191; \ -float32x4_t __reint_191 = __s2_191; \ -uint64x2_t __reint1_191 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_191, __p3_191), vgetq_lane_u64(*(uint64x2_t *) &__reint_191, __p3_191)}; \ - __ret_191 = vcmlaq_rot270_f32(__s0_191, __s1_191, *(float32x4_t *) &__reint1_191); \ - __ret_191; \ -}) -#else -#define vcmlaq_rot270_laneq_f32(__p0_192, __p1_192, __p2_192, __p3_192) __extension__ ({ \ - float32x4_t __ret_192; \ - float32x4_t __s0_192 = __p0_192; \ - float32x4_t __s1_192 = __p1_192; \ - float32x4_t __s2_192 = __p2_192; \ - float32x4_t __rev0_192; __rev0_192 = __builtin_shufflevector(__s0_192, __s0_192, 3, 2, 1, 0); \ - float32x4_t __rev1_192; __rev1_192 = __builtin_shufflevector(__s1_192, __s1_192, 3, 2, 1, 0); \ - float32x4_t __rev2_192; __rev2_192 = __builtin_shufflevector(__s2_192, __s2_192, 3, 2, 1, 0); \ -float32x4_t __reint_192 = __rev2_192; \ -uint64x2_t __reint1_192 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_192, __p3_192), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_192, __p3_192)}; \ - __ret_192 = __noswap_vcmlaq_rot270_f32(__rev0_192, __rev1_192, *(float32x4_t *) &__reint1_192); \ - __ret_192 = __builtin_shufflevector(__ret_192, __ret_192, 3, 2, 1, 0); \ - __ret_192; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float32x4_t vcmlaq_rot90_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) { - float32x4_t __ret; - __ret = (float32x4_t) __builtin_neon_vcmlaq_rot90_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); - return __ret; -} -#else -__ai float32x4_t vcmlaq_rot90_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) { - float32x4_t __ret; - float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - float32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); - __ret = (float32x4_t) __builtin_neon_vcmlaq_rot90_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -__ai float32x4_t __noswap_vcmlaq_rot90_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) { - float32x4_t __ret; - __ret = (float32x4_t) __builtin_neon_vcmlaq_rot90_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float32x2_t vcmla_rot90_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) { - float32x2_t __ret; - __ret = (float32x2_t) __builtin_neon_vcmla_rot90_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9); - return __ret; -} -#else -__ai float32x2_t vcmla_rot90_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) { - float32x2_t __ret; - float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - float32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); - __ret = (float32x2_t) __builtin_neon_vcmla_rot90_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 9); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -__ai float32x2_t __noswap_vcmla_rot90_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) { - float32x2_t __ret; - __ret = (float32x2_t) __builtin_neon_vcmla_rot90_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcmla_rot90_lane_f32(__p0_193, __p1_193, __p2_193, __p3_193) __extension__ ({ \ - float32x2_t __ret_193; \ - float32x2_t __s0_193 = __p0_193; \ - float32x2_t __s1_193 = __p1_193; \ - float32x2_t __s2_193 = __p2_193; \ -float32x2_t __reint_193 = __s2_193; \ -uint64x1_t __reint1_193 = (uint64x1_t) {vget_lane_u64(*(uint64x1_t *) &__reint_193, __p3_193)}; \ - __ret_193 = vcmla_rot90_f32(__s0_193, __s1_193, *(float32x2_t *) &__reint1_193); \ - __ret_193; \ -}) -#else -#define vcmla_rot90_lane_f32(__p0_194, __p1_194, __p2_194, __p3_194) __extension__ ({ \ - float32x2_t __ret_194; \ - float32x2_t __s0_194 = __p0_194; \ - float32x2_t __s1_194 = __p1_194; \ - float32x2_t __s2_194 = __p2_194; \ - float32x2_t __rev0_194; __rev0_194 = __builtin_shufflevector(__s0_194, __s0_194, 1, 0); \ - float32x2_t __rev1_194; __rev1_194 = __builtin_shufflevector(__s1_194, __s1_194, 1, 0); \ - float32x2_t __rev2_194; __rev2_194 = __builtin_shufflevector(__s2_194, __s2_194, 1, 0); \ -float32x2_t __reint_194 = __rev2_194; \ -uint64x1_t __reint1_194 = (uint64x1_t) {vget_lane_u64(*(uint64x1_t *) &__reint_194, __p3_194)}; \ - __ret_194 = __noswap_vcmla_rot90_f32(__rev0_194, __rev1_194, *(float32x2_t *) &__reint1_194); \ - __ret_194 = __builtin_shufflevector(__ret_194, __ret_194, 1, 0); \ - __ret_194; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcmlaq_rot90_lane_f32(__p0_195, __p1_195, __p2_195, __p3_195) __extension__ ({ \ - float32x4_t __ret_195; \ - float32x4_t __s0_195 = __p0_195; \ - float32x4_t __s1_195 = __p1_195; \ - float32x2_t __s2_195 = __p2_195; \ -float32x2_t __reint_195 = __s2_195; \ -uint64x2_t __reint1_195 = (uint64x2_t) {vget_lane_u64(*(uint64x1_t *) &__reint_195, __p3_195), vget_lane_u64(*(uint64x1_t *) &__reint_195, __p3_195)}; \ - __ret_195 = vcmlaq_rot90_f32(__s0_195, __s1_195, *(float32x4_t *) &__reint1_195); \ - __ret_195; \ -}) -#else -#define vcmlaq_rot90_lane_f32(__p0_196, __p1_196, __p2_196, __p3_196) __extension__ ({ \ - float32x4_t __ret_196; \ - float32x4_t __s0_196 = __p0_196; \ - float32x4_t __s1_196 = __p1_196; \ - float32x2_t __s2_196 = __p2_196; \ - float32x4_t __rev0_196; __rev0_196 = __builtin_shufflevector(__s0_196, __s0_196, 3, 2, 1, 0); \ - float32x4_t __rev1_196; __rev1_196 = __builtin_shufflevector(__s1_196, __s1_196, 3, 2, 1, 0); \ - float32x2_t __rev2_196; __rev2_196 = __builtin_shufflevector(__s2_196, __s2_196, 1, 0); \ -float32x2_t __reint_196 = __rev2_196; \ -uint64x2_t __reint1_196 = (uint64x2_t) {vget_lane_u64(*(uint64x1_t *) &__reint_196, __p3_196), vget_lane_u64(*(uint64x1_t *) &__reint_196, __p3_196)}; \ - __ret_196 = __noswap_vcmlaq_rot90_f32(__rev0_196, __rev1_196, *(float32x4_t *) &__reint1_196); \ - __ret_196 = __builtin_shufflevector(__ret_196, __ret_196, 3, 2, 1, 0); \ - __ret_196; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcmla_rot90_laneq_f32(__p0_197, __p1_197, __p2_197, __p3_197) __extension__ ({ \ - float32x2_t __ret_197; \ - float32x2_t __s0_197 = __p0_197; \ - float32x2_t __s1_197 = __p1_197; \ - float32x4_t __s2_197 = __p2_197; \ -float32x4_t __reint_197 = __s2_197; \ -uint64x1_t __reint1_197 = (uint64x1_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_197, __p3_197)}; \ - __ret_197 = vcmla_rot90_f32(__s0_197, __s1_197, *(float32x2_t *) &__reint1_197); \ - __ret_197; \ -}) -#else -#define vcmla_rot90_laneq_f32(__p0_198, __p1_198, __p2_198, __p3_198) __extension__ ({ \ - float32x2_t __ret_198; \ - float32x2_t __s0_198 = __p0_198; \ - float32x2_t __s1_198 = __p1_198; \ - float32x4_t __s2_198 = __p2_198; \ - float32x2_t __rev0_198; __rev0_198 = __builtin_shufflevector(__s0_198, __s0_198, 1, 0); \ - float32x2_t __rev1_198; __rev1_198 = __builtin_shufflevector(__s1_198, __s1_198, 1, 0); \ - float32x4_t __rev2_198; __rev2_198 = __builtin_shufflevector(__s2_198, __s2_198, 3, 2, 1, 0); \ -float32x4_t __reint_198 = __rev2_198; \ -uint64x1_t __reint1_198 = (uint64x1_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_198, __p3_198)}; \ - __ret_198 = __noswap_vcmla_rot90_f32(__rev0_198, __rev1_198, *(float32x2_t *) &__reint1_198); \ - __ret_198 = __builtin_shufflevector(__ret_198, __ret_198, 1, 0); \ - __ret_198; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcmlaq_rot90_laneq_f32(__p0_199, __p1_199, __p2_199, __p3_199) __extension__ ({ \ - float32x4_t __ret_199; \ - float32x4_t __s0_199 = __p0_199; \ - float32x4_t __s1_199 = __p1_199; \ - float32x4_t __s2_199 = __p2_199; \ -float32x4_t __reint_199 = __s2_199; \ -uint64x2_t __reint1_199 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_199, __p3_199), vgetq_lane_u64(*(uint64x2_t *) &__reint_199, __p3_199)}; \ - __ret_199 = vcmlaq_rot90_f32(__s0_199, __s1_199, *(float32x4_t *) &__reint1_199); \ - __ret_199; \ -}) -#else -#define vcmlaq_rot90_laneq_f32(__p0_200, __p1_200, __p2_200, __p3_200) __extension__ ({ \ - float32x4_t __ret_200; \ - float32x4_t __s0_200 = __p0_200; \ - float32x4_t __s1_200 = __p1_200; \ - float32x4_t __s2_200 = __p2_200; \ - float32x4_t __rev0_200; __rev0_200 = __builtin_shufflevector(__s0_200, __s0_200, 3, 2, 1, 0); \ - float32x4_t __rev1_200; __rev1_200 = __builtin_shufflevector(__s1_200, __s1_200, 3, 2, 1, 0); \ - float32x4_t __rev2_200; __rev2_200 = __builtin_shufflevector(__s2_200, __s2_200, 3, 2, 1, 0); \ -float32x4_t __reint_200 = __rev2_200; \ -uint64x2_t __reint1_200 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_200, __p3_200), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_200, __p3_200)}; \ - __ret_200 = __noswap_vcmlaq_rot90_f32(__rev0_200, __rev1_200, *(float32x4_t *) &__reint1_200); \ - __ret_200 = __builtin_shufflevector(__ret_200, __ret_200, 3, 2, 1, 0); \ - __ret_200; \ -}) -#endif - -#endif -#if defined(__ARM_FEATURE_COMPLEX) && defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC) -#ifdef __LITTLE_ENDIAN__ -__ai float16x4_t vcadd_rot270_f16(float16x4_t __p0, float16x4_t __p1) { - float16x4_t __ret; - __ret = (float16x4_t) __builtin_neon_vcadd_rot270_v((int8x8_t)__p0, (int8x8_t)__p1, 8); - return __ret; -} -#else -__ai float16x4_t vcadd_rot270_f16(float16x4_t __p0, float16x4_t __p1) { - float16x4_t __ret; - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = (float16x4_t) __builtin_neon_vcadd_rot270_v((int8x8_t)__rev0, (int8x8_t)__rev1, 8); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16x4_t vcadd_rot90_f16(float16x4_t __p0, float16x4_t __p1) { - float16x4_t __ret; - __ret = (float16x4_t) __builtin_neon_vcadd_rot90_v((int8x8_t)__p0, (int8x8_t)__p1, 8); - return __ret; -} -#else -__ai float16x4_t vcadd_rot90_f16(float16x4_t __p0, float16x4_t __p1) { - float16x4_t __ret; - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = (float16x4_t) __builtin_neon_vcadd_rot90_v((int8x8_t)__rev0, (int8x8_t)__rev1, 8); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16x8_t vcaddq_rot270_f16(float16x8_t __p0, float16x8_t __p1) { - float16x8_t __ret; - __ret = (float16x8_t) __builtin_neon_vcaddq_rot270_v((int8x16_t)__p0, (int8x16_t)__p1, 40); - return __ret; -} -#else -__ai float16x8_t vcaddq_rot270_f16(float16x8_t __p0, float16x8_t __p1) { - float16x8_t __ret; - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (float16x8_t) __builtin_neon_vcaddq_rot270_v((int8x16_t)__rev0, (int8x16_t)__rev1, 40); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16x8_t vcaddq_rot90_f16(float16x8_t __p0, float16x8_t __p1) { - float16x8_t __ret; - __ret = (float16x8_t) __builtin_neon_vcaddq_rot90_v((int8x16_t)__p0, (int8x16_t)__p1, 40); - return __ret; -} -#else -__ai float16x8_t vcaddq_rot90_f16(float16x8_t __p0, float16x8_t __p1) { - float16x8_t __ret; - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (float16x8_t) __builtin_neon_vcaddq_rot90_v((int8x16_t)__rev0, (int8x16_t)__rev1, 40); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16x8_t vcmlaq_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) { - float16x8_t __ret; - __ret = (float16x8_t) __builtin_neon_vcmlaq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 40); - return __ret; -} -#else -__ai float16x8_t vcmlaq_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) { - float16x8_t __ret; - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - float16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (float16x8_t) __builtin_neon_vcmlaq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 40); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -__ai float16x8_t __noswap_vcmlaq_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) { - float16x8_t __ret; - __ret = (float16x8_t) __builtin_neon_vcmlaq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 40); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16x4_t vcmla_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) { - float16x4_t __ret; - __ret = (float16x4_t) __builtin_neon_vcmla_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 8); - return __ret; -} -#else -__ai float16x4_t vcmla_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) { - float16x4_t __ret; - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - float16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); - __ret = (float16x4_t) __builtin_neon_vcmla_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 8); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -__ai float16x4_t __noswap_vcmla_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) { - float16x4_t __ret; - __ret = (float16x4_t) __builtin_neon_vcmla_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 8); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcmla_lane_f16(__p0_201, __p1_201, __p2_201, __p3_201) __extension__ ({ \ - float16x4_t __ret_201; \ - float16x4_t __s0_201 = __p0_201; \ - float16x4_t __s1_201 = __p1_201; \ - float16x4_t __s2_201 = __p2_201; \ -float16x4_t __reint_201 = __s2_201; \ -uint32x2_t __reint1_201 = (uint32x2_t) {vget_lane_u32(*(uint32x2_t *) &__reint_201, __p3_201), vget_lane_u32(*(uint32x2_t *) &__reint_201, __p3_201)}; \ - __ret_201 = vcmla_f16(__s0_201, __s1_201, *(float16x4_t *) &__reint1_201); \ - __ret_201; \ -}) -#else -#define vcmla_lane_f16(__p0_202, __p1_202, __p2_202, __p3_202) __extension__ ({ \ - float16x4_t __ret_202; \ - float16x4_t __s0_202 = __p0_202; \ - float16x4_t __s1_202 = __p1_202; \ - float16x4_t __s2_202 = __p2_202; \ - float16x4_t __rev0_202; __rev0_202 = __builtin_shufflevector(__s0_202, __s0_202, 3, 2, 1, 0); \ - float16x4_t __rev1_202; __rev1_202 = __builtin_shufflevector(__s1_202, __s1_202, 3, 2, 1, 0); \ - float16x4_t __rev2_202; __rev2_202 = __builtin_shufflevector(__s2_202, __s2_202, 3, 2, 1, 0); \ -float16x4_t __reint_202 = __rev2_202; \ -uint32x2_t __reint1_202 = (uint32x2_t) {__noswap_vget_lane_u32(*(uint32x2_t *) &__reint_202, __p3_202), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_202, __p3_202)}; \ - __ret_202 = __noswap_vcmla_f16(__rev0_202, __rev1_202, *(float16x4_t *) &__reint1_202); \ - __ret_202 = __builtin_shufflevector(__ret_202, __ret_202, 3, 2, 1, 0); \ - __ret_202; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcmlaq_lane_f16(__p0_203, __p1_203, __p2_203, __p3_203) __extension__ ({ \ - float16x8_t __ret_203; \ - float16x8_t __s0_203 = __p0_203; \ - float16x8_t __s1_203 = __p1_203; \ - float16x4_t __s2_203 = __p2_203; \ -float16x4_t __reint_203 = __s2_203; \ -uint32x4_t __reint1_203 = (uint32x4_t) {vget_lane_u32(*(uint32x2_t *) &__reint_203, __p3_203), vget_lane_u32(*(uint32x2_t *) &__reint_203, __p3_203), vget_lane_u32(*(uint32x2_t *) &__reint_203, __p3_203), vget_lane_u32(*(uint32x2_t *) &__reint_203, __p3_203)}; \ - __ret_203 = vcmlaq_f16(__s0_203, __s1_203, *(float16x8_t *) &__reint1_203); \ - __ret_203; \ -}) -#else -#define vcmlaq_lane_f16(__p0_204, __p1_204, __p2_204, __p3_204) __extension__ ({ \ - float16x8_t __ret_204; \ - float16x8_t __s0_204 = __p0_204; \ - float16x8_t __s1_204 = __p1_204; \ - float16x4_t __s2_204 = __p2_204; \ - float16x8_t __rev0_204; __rev0_204 = __builtin_shufflevector(__s0_204, __s0_204, 7, 6, 5, 4, 3, 2, 1, 0); \ - float16x8_t __rev1_204; __rev1_204 = __builtin_shufflevector(__s1_204, __s1_204, 7, 6, 5, 4, 3, 2, 1, 0); \ - float16x4_t __rev2_204; __rev2_204 = __builtin_shufflevector(__s2_204, __s2_204, 3, 2, 1, 0); \ -float16x4_t __reint_204 = __rev2_204; \ -uint32x4_t __reint1_204 = (uint32x4_t) {__noswap_vget_lane_u32(*(uint32x2_t *) &__reint_204, __p3_204), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_204, __p3_204), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_204, __p3_204), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_204, __p3_204)}; \ - __ret_204 = __noswap_vcmlaq_f16(__rev0_204, __rev1_204, *(float16x8_t *) &__reint1_204); \ - __ret_204 = __builtin_shufflevector(__ret_204, __ret_204, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_204; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcmla_laneq_f16(__p0_205, __p1_205, __p2_205, __p3_205) __extension__ ({ \ - float16x4_t __ret_205; \ - float16x4_t __s0_205 = __p0_205; \ - float16x4_t __s1_205 = __p1_205; \ - float16x8_t __s2_205 = __p2_205; \ -float16x8_t __reint_205 = __s2_205; \ -uint32x2_t __reint1_205 = (uint32x2_t) {vgetq_lane_u32(*(uint32x4_t *) &__reint_205, __p3_205), vgetq_lane_u32(*(uint32x4_t *) &__reint_205, __p3_205)}; \ - __ret_205 = vcmla_f16(__s0_205, __s1_205, *(float16x4_t *) &__reint1_205); \ - __ret_205; \ -}) -#else -#define vcmla_laneq_f16(__p0_206, __p1_206, __p2_206, __p3_206) __extension__ ({ \ - float16x4_t __ret_206; \ - float16x4_t __s0_206 = __p0_206; \ - float16x4_t __s1_206 = __p1_206; \ - float16x8_t __s2_206 = __p2_206; \ - float16x4_t __rev0_206; __rev0_206 = __builtin_shufflevector(__s0_206, __s0_206, 3, 2, 1, 0); \ - float16x4_t __rev1_206; __rev1_206 = __builtin_shufflevector(__s1_206, __s1_206, 3, 2, 1, 0); \ - float16x8_t __rev2_206; __rev2_206 = __builtin_shufflevector(__s2_206, __s2_206, 7, 6, 5, 4, 3, 2, 1, 0); \ -float16x8_t __reint_206 = __rev2_206; \ -uint32x2_t __reint1_206 = (uint32x2_t) {__noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_206, __p3_206), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_206, __p3_206)}; \ - __ret_206 = __noswap_vcmla_f16(__rev0_206, __rev1_206, *(float16x4_t *) &__reint1_206); \ - __ret_206 = __builtin_shufflevector(__ret_206, __ret_206, 3, 2, 1, 0); \ - __ret_206; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcmlaq_laneq_f16(__p0_207, __p1_207, __p2_207, __p3_207) __extension__ ({ \ - float16x8_t __ret_207; \ - float16x8_t __s0_207 = __p0_207; \ - float16x8_t __s1_207 = __p1_207; \ - float16x8_t __s2_207 = __p2_207; \ -float16x8_t __reint_207 = __s2_207; \ -uint32x4_t __reint1_207 = (uint32x4_t) {vgetq_lane_u32(*(uint32x4_t *) &__reint_207, __p3_207), vgetq_lane_u32(*(uint32x4_t *) &__reint_207, __p3_207), vgetq_lane_u32(*(uint32x4_t *) &__reint_207, __p3_207), vgetq_lane_u32(*(uint32x4_t *) &__reint_207, __p3_207)}; \ - __ret_207 = vcmlaq_f16(__s0_207, __s1_207, *(float16x8_t *) &__reint1_207); \ - __ret_207; \ -}) -#else -#define vcmlaq_laneq_f16(__p0_208, __p1_208, __p2_208, __p3_208) __extension__ ({ \ - float16x8_t __ret_208; \ - float16x8_t __s0_208 = __p0_208; \ - float16x8_t __s1_208 = __p1_208; \ - float16x8_t __s2_208 = __p2_208; \ - float16x8_t __rev0_208; __rev0_208 = __builtin_shufflevector(__s0_208, __s0_208, 7, 6, 5, 4, 3, 2, 1, 0); \ - float16x8_t __rev1_208; __rev1_208 = __builtin_shufflevector(__s1_208, __s1_208, 7, 6, 5, 4, 3, 2, 1, 0); \ - float16x8_t __rev2_208; __rev2_208 = __builtin_shufflevector(__s2_208, __s2_208, 7, 6, 5, 4, 3, 2, 1, 0); \ -float16x8_t __reint_208 = __rev2_208; \ -uint32x4_t __reint1_208 = (uint32x4_t) {__noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_208, __p3_208), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_208, __p3_208), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_208, __p3_208), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_208, __p3_208)}; \ - __ret_208 = __noswap_vcmlaq_f16(__rev0_208, __rev1_208, *(float16x8_t *) &__reint1_208); \ - __ret_208 = __builtin_shufflevector(__ret_208, __ret_208, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_208; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16x8_t vcmlaq_rot180_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) { - float16x8_t __ret; - __ret = (float16x8_t) __builtin_neon_vcmlaq_rot180_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 40); - return __ret; -} -#else -__ai float16x8_t vcmlaq_rot180_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) { - float16x8_t __ret; - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - float16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (float16x8_t) __builtin_neon_vcmlaq_rot180_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 40); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -__ai float16x8_t __noswap_vcmlaq_rot180_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) { - float16x8_t __ret; - __ret = (float16x8_t) __builtin_neon_vcmlaq_rot180_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 40); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16x4_t vcmla_rot180_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) { - float16x4_t __ret; - __ret = (float16x4_t) __builtin_neon_vcmla_rot180_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 8); - return __ret; -} -#else -__ai float16x4_t vcmla_rot180_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) { - float16x4_t __ret; - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - float16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); - __ret = (float16x4_t) __builtin_neon_vcmla_rot180_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 8); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -__ai float16x4_t __noswap_vcmla_rot180_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) { - float16x4_t __ret; - __ret = (float16x4_t) __builtin_neon_vcmla_rot180_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 8); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcmla_rot180_lane_f16(__p0_209, __p1_209, __p2_209, __p3_209) __extension__ ({ \ - float16x4_t __ret_209; \ - float16x4_t __s0_209 = __p0_209; \ - float16x4_t __s1_209 = __p1_209; \ - float16x4_t __s2_209 = __p2_209; \ -float16x4_t __reint_209 = __s2_209; \ -uint32x2_t __reint1_209 = (uint32x2_t) {vget_lane_u32(*(uint32x2_t *) &__reint_209, __p3_209), vget_lane_u32(*(uint32x2_t *) &__reint_209, __p3_209)}; \ - __ret_209 = vcmla_rot180_f16(__s0_209, __s1_209, *(float16x4_t *) &__reint1_209); \ - __ret_209; \ -}) -#else -#define vcmla_rot180_lane_f16(__p0_210, __p1_210, __p2_210, __p3_210) __extension__ ({ \ - float16x4_t __ret_210; \ - float16x4_t __s0_210 = __p0_210; \ - float16x4_t __s1_210 = __p1_210; \ - float16x4_t __s2_210 = __p2_210; \ - float16x4_t __rev0_210; __rev0_210 = __builtin_shufflevector(__s0_210, __s0_210, 3, 2, 1, 0); \ - float16x4_t __rev1_210; __rev1_210 = __builtin_shufflevector(__s1_210, __s1_210, 3, 2, 1, 0); \ - float16x4_t __rev2_210; __rev2_210 = __builtin_shufflevector(__s2_210, __s2_210, 3, 2, 1, 0); \ -float16x4_t __reint_210 = __rev2_210; \ -uint32x2_t __reint1_210 = (uint32x2_t) {__noswap_vget_lane_u32(*(uint32x2_t *) &__reint_210, __p3_210), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_210, __p3_210)}; \ - __ret_210 = __noswap_vcmla_rot180_f16(__rev0_210, __rev1_210, *(float16x4_t *) &__reint1_210); \ - __ret_210 = __builtin_shufflevector(__ret_210, __ret_210, 3, 2, 1, 0); \ - __ret_210; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcmlaq_rot180_lane_f16(__p0_211, __p1_211, __p2_211, __p3_211) __extension__ ({ \ - float16x8_t __ret_211; \ - float16x8_t __s0_211 = __p0_211; \ - float16x8_t __s1_211 = __p1_211; \ - float16x4_t __s2_211 = __p2_211; \ -float16x4_t __reint_211 = __s2_211; \ -uint32x4_t __reint1_211 = (uint32x4_t) {vget_lane_u32(*(uint32x2_t *) &__reint_211, __p3_211), vget_lane_u32(*(uint32x2_t *) &__reint_211, __p3_211), vget_lane_u32(*(uint32x2_t *) &__reint_211, __p3_211), vget_lane_u32(*(uint32x2_t *) &__reint_211, __p3_211)}; \ - __ret_211 = vcmlaq_rot180_f16(__s0_211, __s1_211, *(float16x8_t *) &__reint1_211); \ - __ret_211; \ -}) -#else -#define vcmlaq_rot180_lane_f16(__p0_212, __p1_212, __p2_212, __p3_212) __extension__ ({ \ - float16x8_t __ret_212; \ - float16x8_t __s0_212 = __p0_212; \ - float16x8_t __s1_212 = __p1_212; \ - float16x4_t __s2_212 = __p2_212; \ - float16x8_t __rev0_212; __rev0_212 = __builtin_shufflevector(__s0_212, __s0_212, 7, 6, 5, 4, 3, 2, 1, 0); \ - float16x8_t __rev1_212; __rev1_212 = __builtin_shufflevector(__s1_212, __s1_212, 7, 6, 5, 4, 3, 2, 1, 0); \ - float16x4_t __rev2_212; __rev2_212 = __builtin_shufflevector(__s2_212, __s2_212, 3, 2, 1, 0); \ -float16x4_t __reint_212 = __rev2_212; \ -uint32x4_t __reint1_212 = (uint32x4_t) {__noswap_vget_lane_u32(*(uint32x2_t *) &__reint_212, __p3_212), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_212, __p3_212), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_212, __p3_212), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_212, __p3_212)}; \ - __ret_212 = __noswap_vcmlaq_rot180_f16(__rev0_212, __rev1_212, *(float16x8_t *) &__reint1_212); \ - __ret_212 = __builtin_shufflevector(__ret_212, __ret_212, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_212; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcmla_rot180_laneq_f16(__p0_213, __p1_213, __p2_213, __p3_213) __extension__ ({ \ - float16x4_t __ret_213; \ - float16x4_t __s0_213 = __p0_213; \ - float16x4_t __s1_213 = __p1_213; \ - float16x8_t __s2_213 = __p2_213; \ -float16x8_t __reint_213 = __s2_213; \ -uint32x2_t __reint1_213 = (uint32x2_t) {vgetq_lane_u32(*(uint32x4_t *) &__reint_213, __p3_213), vgetq_lane_u32(*(uint32x4_t *) &__reint_213, __p3_213)}; \ - __ret_213 = vcmla_rot180_f16(__s0_213, __s1_213, *(float16x4_t *) &__reint1_213); \ - __ret_213; \ -}) -#else -#define vcmla_rot180_laneq_f16(__p0_214, __p1_214, __p2_214, __p3_214) __extension__ ({ \ - float16x4_t __ret_214; \ - float16x4_t __s0_214 = __p0_214; \ - float16x4_t __s1_214 = __p1_214; \ - float16x8_t __s2_214 = __p2_214; \ - float16x4_t __rev0_214; __rev0_214 = __builtin_shufflevector(__s0_214, __s0_214, 3, 2, 1, 0); \ - float16x4_t __rev1_214; __rev1_214 = __builtin_shufflevector(__s1_214, __s1_214, 3, 2, 1, 0); \ - float16x8_t __rev2_214; __rev2_214 = __builtin_shufflevector(__s2_214, __s2_214, 7, 6, 5, 4, 3, 2, 1, 0); \ -float16x8_t __reint_214 = __rev2_214; \ -uint32x2_t __reint1_214 = (uint32x2_t) {__noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_214, __p3_214), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_214, __p3_214)}; \ - __ret_214 = __noswap_vcmla_rot180_f16(__rev0_214, __rev1_214, *(float16x4_t *) &__reint1_214); \ - __ret_214 = __builtin_shufflevector(__ret_214, __ret_214, 3, 2, 1, 0); \ - __ret_214; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcmlaq_rot180_laneq_f16(__p0_215, __p1_215, __p2_215, __p3_215) __extension__ ({ \ - float16x8_t __ret_215; \ - float16x8_t __s0_215 = __p0_215; \ - float16x8_t __s1_215 = __p1_215; \ - float16x8_t __s2_215 = __p2_215; \ -float16x8_t __reint_215 = __s2_215; \ -uint32x4_t __reint1_215 = (uint32x4_t) {vgetq_lane_u32(*(uint32x4_t *) &__reint_215, __p3_215), vgetq_lane_u32(*(uint32x4_t *) &__reint_215, __p3_215), vgetq_lane_u32(*(uint32x4_t *) &__reint_215, __p3_215), vgetq_lane_u32(*(uint32x4_t *) &__reint_215, __p3_215)}; \ - __ret_215 = vcmlaq_rot180_f16(__s0_215, __s1_215, *(float16x8_t *) &__reint1_215); \ - __ret_215; \ -}) -#else -#define vcmlaq_rot180_laneq_f16(__p0_216, __p1_216, __p2_216, __p3_216) __extension__ ({ \ - float16x8_t __ret_216; \ - float16x8_t __s0_216 = __p0_216; \ - float16x8_t __s1_216 = __p1_216; \ - float16x8_t __s2_216 = __p2_216; \ - float16x8_t __rev0_216; __rev0_216 = __builtin_shufflevector(__s0_216, __s0_216, 7, 6, 5, 4, 3, 2, 1, 0); \ - float16x8_t __rev1_216; __rev1_216 = __builtin_shufflevector(__s1_216, __s1_216, 7, 6, 5, 4, 3, 2, 1, 0); \ - float16x8_t __rev2_216; __rev2_216 = __builtin_shufflevector(__s2_216, __s2_216, 7, 6, 5, 4, 3, 2, 1, 0); \ -float16x8_t __reint_216 = __rev2_216; \ -uint32x4_t __reint1_216 = (uint32x4_t) {__noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_216, __p3_216), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_216, __p3_216), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_216, __p3_216), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_216, __p3_216)}; \ - __ret_216 = __noswap_vcmlaq_rot180_f16(__rev0_216, __rev1_216, *(float16x8_t *) &__reint1_216); \ - __ret_216 = __builtin_shufflevector(__ret_216, __ret_216, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_216; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16x8_t vcmlaq_rot270_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) { - float16x8_t __ret; - __ret = (float16x8_t) __builtin_neon_vcmlaq_rot270_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 40); - return __ret; -} -#else -__ai float16x8_t vcmlaq_rot270_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) { - float16x8_t __ret; - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - float16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (float16x8_t) __builtin_neon_vcmlaq_rot270_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 40); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -__ai float16x8_t __noswap_vcmlaq_rot270_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) { - float16x8_t __ret; - __ret = (float16x8_t) __builtin_neon_vcmlaq_rot270_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 40); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16x4_t vcmla_rot270_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) { - float16x4_t __ret; - __ret = (float16x4_t) __builtin_neon_vcmla_rot270_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 8); - return __ret; -} -#else -__ai float16x4_t vcmla_rot270_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) { - float16x4_t __ret; - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - float16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); - __ret = (float16x4_t) __builtin_neon_vcmla_rot270_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 8); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -__ai float16x4_t __noswap_vcmla_rot270_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) { - float16x4_t __ret; - __ret = (float16x4_t) __builtin_neon_vcmla_rot270_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 8); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcmla_rot270_lane_f16(__p0_217, __p1_217, __p2_217, __p3_217) __extension__ ({ \ - float16x4_t __ret_217; \ - float16x4_t __s0_217 = __p0_217; \ - float16x4_t __s1_217 = __p1_217; \ - float16x4_t __s2_217 = __p2_217; \ -float16x4_t __reint_217 = __s2_217; \ -uint32x2_t __reint1_217 = (uint32x2_t) {vget_lane_u32(*(uint32x2_t *) &__reint_217, __p3_217), vget_lane_u32(*(uint32x2_t *) &__reint_217, __p3_217)}; \ - __ret_217 = vcmla_rot270_f16(__s0_217, __s1_217, *(float16x4_t *) &__reint1_217); \ - __ret_217; \ -}) -#else -#define vcmla_rot270_lane_f16(__p0_218, __p1_218, __p2_218, __p3_218) __extension__ ({ \ - float16x4_t __ret_218; \ - float16x4_t __s0_218 = __p0_218; \ - float16x4_t __s1_218 = __p1_218; \ - float16x4_t __s2_218 = __p2_218; \ - float16x4_t __rev0_218; __rev0_218 = __builtin_shufflevector(__s0_218, __s0_218, 3, 2, 1, 0); \ - float16x4_t __rev1_218; __rev1_218 = __builtin_shufflevector(__s1_218, __s1_218, 3, 2, 1, 0); \ - float16x4_t __rev2_218; __rev2_218 = __builtin_shufflevector(__s2_218, __s2_218, 3, 2, 1, 0); \ -float16x4_t __reint_218 = __rev2_218; \ -uint32x2_t __reint1_218 = (uint32x2_t) {__noswap_vget_lane_u32(*(uint32x2_t *) &__reint_218, __p3_218), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_218, __p3_218)}; \ - __ret_218 = __noswap_vcmla_rot270_f16(__rev0_218, __rev1_218, *(float16x4_t *) &__reint1_218); \ - __ret_218 = __builtin_shufflevector(__ret_218, __ret_218, 3, 2, 1, 0); \ - __ret_218; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcmlaq_rot270_lane_f16(__p0_219, __p1_219, __p2_219, __p3_219) __extension__ ({ \ - float16x8_t __ret_219; \ - float16x8_t __s0_219 = __p0_219; \ - float16x8_t __s1_219 = __p1_219; \ - float16x4_t __s2_219 = __p2_219; \ -float16x4_t __reint_219 = __s2_219; \ -uint32x4_t __reint1_219 = (uint32x4_t) {vget_lane_u32(*(uint32x2_t *) &__reint_219, __p3_219), vget_lane_u32(*(uint32x2_t *) &__reint_219, __p3_219), vget_lane_u32(*(uint32x2_t *) &__reint_219, __p3_219), vget_lane_u32(*(uint32x2_t *) &__reint_219, __p3_219)}; \ - __ret_219 = vcmlaq_rot270_f16(__s0_219, __s1_219, *(float16x8_t *) &__reint1_219); \ - __ret_219; \ -}) -#else -#define vcmlaq_rot270_lane_f16(__p0_220, __p1_220, __p2_220, __p3_220) __extension__ ({ \ - float16x8_t __ret_220; \ - float16x8_t __s0_220 = __p0_220; \ - float16x8_t __s1_220 = __p1_220; \ - float16x4_t __s2_220 = __p2_220; \ - float16x8_t __rev0_220; __rev0_220 = __builtin_shufflevector(__s0_220, __s0_220, 7, 6, 5, 4, 3, 2, 1, 0); \ - float16x8_t __rev1_220; __rev1_220 = __builtin_shufflevector(__s1_220, __s1_220, 7, 6, 5, 4, 3, 2, 1, 0); \ - float16x4_t __rev2_220; __rev2_220 = __builtin_shufflevector(__s2_220, __s2_220, 3, 2, 1, 0); \ -float16x4_t __reint_220 = __rev2_220; \ -uint32x4_t __reint1_220 = (uint32x4_t) {__noswap_vget_lane_u32(*(uint32x2_t *) &__reint_220, __p3_220), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_220, __p3_220), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_220, __p3_220), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_220, __p3_220)}; \ - __ret_220 = __noswap_vcmlaq_rot270_f16(__rev0_220, __rev1_220, *(float16x8_t *) &__reint1_220); \ - __ret_220 = __builtin_shufflevector(__ret_220, __ret_220, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_220; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcmla_rot270_laneq_f16(__p0_221, __p1_221, __p2_221, __p3_221) __extension__ ({ \ - float16x4_t __ret_221; \ - float16x4_t __s0_221 = __p0_221; \ - float16x4_t __s1_221 = __p1_221; \ - float16x8_t __s2_221 = __p2_221; \ -float16x8_t __reint_221 = __s2_221; \ -uint32x2_t __reint1_221 = (uint32x2_t) {vgetq_lane_u32(*(uint32x4_t *) &__reint_221, __p3_221), vgetq_lane_u32(*(uint32x4_t *) &__reint_221, __p3_221)}; \ - __ret_221 = vcmla_rot270_f16(__s0_221, __s1_221, *(float16x4_t *) &__reint1_221); \ - __ret_221; \ -}) -#else -#define vcmla_rot270_laneq_f16(__p0_222, __p1_222, __p2_222, __p3_222) __extension__ ({ \ - float16x4_t __ret_222; \ - float16x4_t __s0_222 = __p0_222; \ - float16x4_t __s1_222 = __p1_222; \ - float16x8_t __s2_222 = __p2_222; \ - float16x4_t __rev0_222; __rev0_222 = __builtin_shufflevector(__s0_222, __s0_222, 3, 2, 1, 0); \ - float16x4_t __rev1_222; __rev1_222 = __builtin_shufflevector(__s1_222, __s1_222, 3, 2, 1, 0); \ - float16x8_t __rev2_222; __rev2_222 = __builtin_shufflevector(__s2_222, __s2_222, 7, 6, 5, 4, 3, 2, 1, 0); \ -float16x8_t __reint_222 = __rev2_222; \ -uint32x2_t __reint1_222 = (uint32x2_t) {__noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_222, __p3_222), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_222, __p3_222)}; \ - __ret_222 = __noswap_vcmla_rot270_f16(__rev0_222, __rev1_222, *(float16x4_t *) &__reint1_222); \ - __ret_222 = __builtin_shufflevector(__ret_222, __ret_222, 3, 2, 1, 0); \ - __ret_222; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcmlaq_rot270_laneq_f16(__p0_223, __p1_223, __p2_223, __p3_223) __extension__ ({ \ - float16x8_t __ret_223; \ - float16x8_t __s0_223 = __p0_223; \ - float16x8_t __s1_223 = __p1_223; \ - float16x8_t __s2_223 = __p2_223; \ -float16x8_t __reint_223 = __s2_223; \ -uint32x4_t __reint1_223 = (uint32x4_t) {vgetq_lane_u32(*(uint32x4_t *) &__reint_223, __p3_223), vgetq_lane_u32(*(uint32x4_t *) &__reint_223, __p3_223), vgetq_lane_u32(*(uint32x4_t *) &__reint_223, __p3_223), vgetq_lane_u32(*(uint32x4_t *) &__reint_223, __p3_223)}; \ - __ret_223 = vcmlaq_rot270_f16(__s0_223, __s1_223, *(float16x8_t *) &__reint1_223); \ - __ret_223; \ -}) -#else -#define vcmlaq_rot270_laneq_f16(__p0_224, __p1_224, __p2_224, __p3_224) __extension__ ({ \ - float16x8_t __ret_224; \ - float16x8_t __s0_224 = __p0_224; \ - float16x8_t __s1_224 = __p1_224; \ - float16x8_t __s2_224 = __p2_224; \ - float16x8_t __rev0_224; __rev0_224 = __builtin_shufflevector(__s0_224, __s0_224, 7, 6, 5, 4, 3, 2, 1, 0); \ - float16x8_t __rev1_224; __rev1_224 = __builtin_shufflevector(__s1_224, __s1_224, 7, 6, 5, 4, 3, 2, 1, 0); \ - float16x8_t __rev2_224; __rev2_224 = __builtin_shufflevector(__s2_224, __s2_224, 7, 6, 5, 4, 3, 2, 1, 0); \ -float16x8_t __reint_224 = __rev2_224; \ -uint32x4_t __reint1_224 = (uint32x4_t) {__noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_224, __p3_224), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_224, __p3_224), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_224, __p3_224), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_224, __p3_224)}; \ - __ret_224 = __noswap_vcmlaq_rot270_f16(__rev0_224, __rev1_224, *(float16x8_t *) &__reint1_224); \ - __ret_224 = __builtin_shufflevector(__ret_224, __ret_224, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_224; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16x8_t vcmlaq_rot90_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) { - float16x8_t __ret; - __ret = (float16x8_t) __builtin_neon_vcmlaq_rot90_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 40); - return __ret; -} -#else -__ai float16x8_t vcmlaq_rot90_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) { - float16x8_t __ret; - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - float16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (float16x8_t) __builtin_neon_vcmlaq_rot90_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 40); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -__ai float16x8_t __noswap_vcmlaq_rot90_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) { - float16x8_t __ret; - __ret = (float16x8_t) __builtin_neon_vcmlaq_rot90_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 40); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16x4_t vcmla_rot90_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) { - float16x4_t __ret; - __ret = (float16x4_t) __builtin_neon_vcmla_rot90_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 8); - return __ret; -} -#else -__ai float16x4_t vcmla_rot90_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) { - float16x4_t __ret; - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - float16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); - __ret = (float16x4_t) __builtin_neon_vcmla_rot90_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 8); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -__ai float16x4_t __noswap_vcmla_rot90_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) { - float16x4_t __ret; - __ret = (float16x4_t) __builtin_neon_vcmla_rot90_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 8); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcmla_rot90_lane_f16(__p0_225, __p1_225, __p2_225, __p3_225) __extension__ ({ \ - float16x4_t __ret_225; \ - float16x4_t __s0_225 = __p0_225; \ - float16x4_t __s1_225 = __p1_225; \ - float16x4_t __s2_225 = __p2_225; \ -float16x4_t __reint_225 = __s2_225; \ -uint32x2_t __reint1_225 = (uint32x2_t) {vget_lane_u32(*(uint32x2_t *) &__reint_225, __p3_225), vget_lane_u32(*(uint32x2_t *) &__reint_225, __p3_225)}; \ - __ret_225 = vcmla_rot90_f16(__s0_225, __s1_225, *(float16x4_t *) &__reint1_225); \ - __ret_225; \ -}) -#else -#define vcmla_rot90_lane_f16(__p0_226, __p1_226, __p2_226, __p3_226) __extension__ ({ \ - float16x4_t __ret_226; \ - float16x4_t __s0_226 = __p0_226; \ - float16x4_t __s1_226 = __p1_226; \ - float16x4_t __s2_226 = __p2_226; \ - float16x4_t __rev0_226; __rev0_226 = __builtin_shufflevector(__s0_226, __s0_226, 3, 2, 1, 0); \ - float16x4_t __rev1_226; __rev1_226 = __builtin_shufflevector(__s1_226, __s1_226, 3, 2, 1, 0); \ - float16x4_t __rev2_226; __rev2_226 = __builtin_shufflevector(__s2_226, __s2_226, 3, 2, 1, 0); \ -float16x4_t __reint_226 = __rev2_226; \ -uint32x2_t __reint1_226 = (uint32x2_t) {__noswap_vget_lane_u32(*(uint32x2_t *) &__reint_226, __p3_226), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_226, __p3_226)}; \ - __ret_226 = __noswap_vcmla_rot90_f16(__rev0_226, __rev1_226, *(float16x4_t *) &__reint1_226); \ - __ret_226 = __builtin_shufflevector(__ret_226, __ret_226, 3, 2, 1, 0); \ - __ret_226; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcmlaq_rot90_lane_f16(__p0_227, __p1_227, __p2_227, __p3_227) __extension__ ({ \ - float16x8_t __ret_227; \ - float16x8_t __s0_227 = __p0_227; \ - float16x8_t __s1_227 = __p1_227; \ - float16x4_t __s2_227 = __p2_227; \ -float16x4_t __reint_227 = __s2_227; \ -uint32x4_t __reint1_227 = (uint32x4_t) {vget_lane_u32(*(uint32x2_t *) &__reint_227, __p3_227), vget_lane_u32(*(uint32x2_t *) &__reint_227, __p3_227), vget_lane_u32(*(uint32x2_t *) &__reint_227, __p3_227), vget_lane_u32(*(uint32x2_t *) &__reint_227, __p3_227)}; \ - __ret_227 = vcmlaq_rot90_f16(__s0_227, __s1_227, *(float16x8_t *) &__reint1_227); \ - __ret_227; \ -}) -#else -#define vcmlaq_rot90_lane_f16(__p0_228, __p1_228, __p2_228, __p3_228) __extension__ ({ \ - float16x8_t __ret_228; \ - float16x8_t __s0_228 = __p0_228; \ - float16x8_t __s1_228 = __p1_228; \ - float16x4_t __s2_228 = __p2_228; \ - float16x8_t __rev0_228; __rev0_228 = __builtin_shufflevector(__s0_228, __s0_228, 7, 6, 5, 4, 3, 2, 1, 0); \ - float16x8_t __rev1_228; __rev1_228 = __builtin_shufflevector(__s1_228, __s1_228, 7, 6, 5, 4, 3, 2, 1, 0); \ - float16x4_t __rev2_228; __rev2_228 = __builtin_shufflevector(__s2_228, __s2_228, 3, 2, 1, 0); \ -float16x4_t __reint_228 = __rev2_228; \ -uint32x4_t __reint1_228 = (uint32x4_t) {__noswap_vget_lane_u32(*(uint32x2_t *) &__reint_228, __p3_228), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_228, __p3_228), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_228, __p3_228), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_228, __p3_228)}; \ - __ret_228 = __noswap_vcmlaq_rot90_f16(__rev0_228, __rev1_228, *(float16x8_t *) &__reint1_228); \ - __ret_228 = __builtin_shufflevector(__ret_228, __ret_228, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_228; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcmla_rot90_laneq_f16(__p0_229, __p1_229, __p2_229, __p3_229) __extension__ ({ \ - float16x4_t __ret_229; \ - float16x4_t __s0_229 = __p0_229; \ - float16x4_t __s1_229 = __p1_229; \ - float16x8_t __s2_229 = __p2_229; \ -float16x8_t __reint_229 = __s2_229; \ -uint32x2_t __reint1_229 = (uint32x2_t) {vgetq_lane_u32(*(uint32x4_t *) &__reint_229, __p3_229), vgetq_lane_u32(*(uint32x4_t *) &__reint_229, __p3_229)}; \ - __ret_229 = vcmla_rot90_f16(__s0_229, __s1_229, *(float16x4_t *) &__reint1_229); \ - __ret_229; \ -}) -#else -#define vcmla_rot90_laneq_f16(__p0_230, __p1_230, __p2_230, __p3_230) __extension__ ({ \ - float16x4_t __ret_230; \ - float16x4_t __s0_230 = __p0_230; \ - float16x4_t __s1_230 = __p1_230; \ - float16x8_t __s2_230 = __p2_230; \ - float16x4_t __rev0_230; __rev0_230 = __builtin_shufflevector(__s0_230, __s0_230, 3, 2, 1, 0); \ - float16x4_t __rev1_230; __rev1_230 = __builtin_shufflevector(__s1_230, __s1_230, 3, 2, 1, 0); \ - float16x8_t __rev2_230; __rev2_230 = __builtin_shufflevector(__s2_230, __s2_230, 7, 6, 5, 4, 3, 2, 1, 0); \ -float16x8_t __reint_230 = __rev2_230; \ -uint32x2_t __reint1_230 = (uint32x2_t) {__noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_230, __p3_230), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_230, __p3_230)}; \ - __ret_230 = __noswap_vcmla_rot90_f16(__rev0_230, __rev1_230, *(float16x4_t *) &__reint1_230); \ - __ret_230 = __builtin_shufflevector(__ret_230, __ret_230, 3, 2, 1, 0); \ - __ret_230; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcmlaq_rot90_laneq_f16(__p0_231, __p1_231, __p2_231, __p3_231) __extension__ ({ \ - float16x8_t __ret_231; \ - float16x8_t __s0_231 = __p0_231; \ - float16x8_t __s1_231 = __p1_231; \ - float16x8_t __s2_231 = __p2_231; \ -float16x8_t __reint_231 = __s2_231; \ -uint32x4_t __reint1_231 = (uint32x4_t) {vgetq_lane_u32(*(uint32x4_t *) &__reint_231, __p3_231), vgetq_lane_u32(*(uint32x4_t *) &__reint_231, __p3_231), vgetq_lane_u32(*(uint32x4_t *) &__reint_231, __p3_231), vgetq_lane_u32(*(uint32x4_t *) &__reint_231, __p3_231)}; \ - __ret_231 = vcmlaq_rot90_f16(__s0_231, __s1_231, *(float16x8_t *) &__reint1_231); \ - __ret_231; \ -}) -#else -#define vcmlaq_rot90_laneq_f16(__p0_232, __p1_232, __p2_232, __p3_232) __extension__ ({ \ - float16x8_t __ret_232; \ - float16x8_t __s0_232 = __p0_232; \ - float16x8_t __s1_232 = __p1_232; \ - float16x8_t __s2_232 = __p2_232; \ - float16x8_t __rev0_232; __rev0_232 = __builtin_shufflevector(__s0_232, __s0_232, 7, 6, 5, 4, 3, 2, 1, 0); \ - float16x8_t __rev1_232; __rev1_232 = __builtin_shufflevector(__s1_232, __s1_232, 7, 6, 5, 4, 3, 2, 1, 0); \ - float16x8_t __rev2_232; __rev2_232 = __builtin_shufflevector(__s2_232, __s2_232, 7, 6, 5, 4, 3, 2, 1, 0); \ -float16x8_t __reint_232 = __rev2_232; \ -uint32x4_t __reint1_232 = (uint32x4_t) {__noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_232, __p3_232), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_232, __p3_232), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_232, __p3_232), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_232, __p3_232)}; \ - __ret_232 = __noswap_vcmlaq_rot90_f16(__rev0_232, __rev1_232, *(float16x8_t *) &__reint1_232); \ - __ret_232 = __builtin_shufflevector(__ret_232, __ret_232, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_232; \ -}) -#endif - -#endif -#if defined(__ARM_FEATURE_COMPLEX) && defined(__aarch64__) -#ifdef __LITTLE_ENDIAN__ -__ai float64x2_t vcaddq_rot270_f64(float64x2_t __p0, float64x2_t __p1) { - float64x2_t __ret; - __ret = (float64x2_t) __builtin_neon_vcaddq_rot270_v((int8x16_t)__p0, (int8x16_t)__p1, 42); - return __ret; -} -#else -__ai float64x2_t vcaddq_rot270_f64(float64x2_t __p0, float64x2_t __p1) { - float64x2_t __ret; - float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __ret = (float64x2_t) __builtin_neon_vcaddq_rot270_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float64x2_t vcaddq_rot90_f64(float64x2_t __p0, float64x2_t __p1) { - float64x2_t __ret; - __ret = (float64x2_t) __builtin_neon_vcaddq_rot90_v((int8x16_t)__p0, (int8x16_t)__p1, 42); - return __ret; -} -#else -__ai float64x2_t vcaddq_rot90_f64(float64x2_t __p0, float64x2_t __p1) { - float64x2_t __ret; - float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __ret = (float64x2_t) __builtin_neon_vcaddq_rot90_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float64x2_t vcmlaq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) { - float64x2_t __ret; - __ret = (float64x2_t) __builtin_neon_vcmlaq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 42); - return __ret; -} -#else -__ai float64x2_t vcmlaq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) { - float64x2_t __ret; - float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - float64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); - __ret = (float64x2_t) __builtin_neon_vcmlaq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 42); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -__ai float64x2_t __noswap_vcmlaq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) { - float64x2_t __ret; - __ret = (float64x2_t) __builtin_neon_vcmlaq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 42); - return __ret; -} -#endif - -__ai float64x1_t vcmla_f64(float64x1_t __p0, float64x1_t __p1, float64x1_t __p2) { - float64x1_t __ret; - __ret = (float64x1_t) __builtin_neon_vcmla_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 10); - return __ret; -} -#define vcmla_lane_f64(__p0_233, __p1_233, __p2_233, __p3_233) __extension__ ({ \ - float64x1_t __ret_233; \ - float64x1_t __s0_233 = __p0_233; \ - float64x1_t __s1_233 = __p1_233; \ - float64x1_t __s2_233 = __p2_233; \ -float64x1_t __reint_233 = __s2_233; \ -uint64x2_t __reint1_233 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_233, __p3_233), vgetq_lane_u64(*(uint64x2_t *) &__reint_233, __p3_233)}; \ - __ret_233 = vcmla_f64(__s0_233, __s1_233, *(float64x1_t *) &__reint1_233); \ - __ret_233; \ -}) -#ifdef __LITTLE_ENDIAN__ -#define vcmlaq_lane_f64(__p0_234, __p1_234, __p2_234, __p3_234) __extension__ ({ \ - float64x2_t __ret_234; \ - float64x2_t __s0_234 = __p0_234; \ - float64x2_t __s1_234 = __p1_234; \ - float64x1_t __s2_234 = __p2_234; \ -float64x1_t __reint_234 = __s2_234; \ -uint64x2_t __reint1_234 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_234, __p3_234), vgetq_lane_u64(*(uint64x2_t *) &__reint_234, __p3_234)}; \ - __ret_234 = vcmlaq_f64(__s0_234, __s1_234, *(float64x2_t *) &__reint1_234); \ - __ret_234; \ -}) -#else -#define vcmlaq_lane_f64(__p0_235, __p1_235, __p2_235, __p3_235) __extension__ ({ \ - float64x2_t __ret_235; \ - float64x2_t __s0_235 = __p0_235; \ - float64x2_t __s1_235 = __p1_235; \ - float64x1_t __s2_235 = __p2_235; \ - float64x2_t __rev0_235; __rev0_235 = __builtin_shufflevector(__s0_235, __s0_235, 1, 0); \ - float64x2_t __rev1_235; __rev1_235 = __builtin_shufflevector(__s1_235, __s1_235, 1, 0); \ -float64x1_t __reint_235 = __s2_235; \ -uint64x2_t __reint1_235 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_235, __p3_235), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_235, __p3_235)}; \ - __ret_235 = __noswap_vcmlaq_f64(__rev0_235, __rev1_235, *(float64x2_t *) &__reint1_235); \ - __ret_235 = __builtin_shufflevector(__ret_235, __ret_235, 1, 0); \ - __ret_235; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcmla_laneq_f64(__p0_236, __p1_236, __p2_236, __p3_236) __extension__ ({ \ - float64x1_t __ret_236; \ - float64x1_t __s0_236 = __p0_236; \ - float64x1_t __s1_236 = __p1_236; \ - float64x2_t __s2_236 = __p2_236; \ -float64x2_t __reint_236 = __s2_236; \ -uint64x2_t __reint1_236 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_236, __p3_236), vgetq_lane_u64(*(uint64x2_t *) &__reint_236, __p3_236)}; \ - __ret_236 = vcmla_f64(__s0_236, __s1_236, *(float64x1_t *) &__reint1_236); \ - __ret_236; \ -}) -#else -#define vcmla_laneq_f64(__p0_237, __p1_237, __p2_237, __p3_237) __extension__ ({ \ - float64x1_t __ret_237; \ - float64x1_t __s0_237 = __p0_237; \ - float64x1_t __s1_237 = __p1_237; \ - float64x2_t __s2_237 = __p2_237; \ - float64x2_t __rev2_237; __rev2_237 = __builtin_shufflevector(__s2_237, __s2_237, 1, 0); \ -float64x2_t __reint_237 = __rev2_237; \ -uint64x2_t __reint1_237 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_237, __p3_237), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_237, __p3_237)}; \ - __ret_237 = vcmla_f64(__s0_237, __s1_237, *(float64x1_t *) &__reint1_237); \ - __ret_237; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcmlaq_laneq_f64(__p0_238, __p1_238, __p2_238, __p3_238) __extension__ ({ \ - float64x2_t __ret_238; \ - float64x2_t __s0_238 = __p0_238; \ - float64x2_t __s1_238 = __p1_238; \ - float64x2_t __s2_238 = __p2_238; \ -float64x2_t __reint_238 = __s2_238; \ -uint64x2_t __reint1_238 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_238, __p3_238), vgetq_lane_u64(*(uint64x2_t *) &__reint_238, __p3_238)}; \ - __ret_238 = vcmlaq_f64(__s0_238, __s1_238, *(float64x2_t *) &__reint1_238); \ - __ret_238; \ -}) -#else -#define vcmlaq_laneq_f64(__p0_239, __p1_239, __p2_239, __p3_239) __extension__ ({ \ - float64x2_t __ret_239; \ - float64x2_t __s0_239 = __p0_239; \ - float64x2_t __s1_239 = __p1_239; \ - float64x2_t __s2_239 = __p2_239; \ - float64x2_t __rev0_239; __rev0_239 = __builtin_shufflevector(__s0_239, __s0_239, 1, 0); \ - float64x2_t __rev1_239; __rev1_239 = __builtin_shufflevector(__s1_239, __s1_239, 1, 0); \ - float64x2_t __rev2_239; __rev2_239 = __builtin_shufflevector(__s2_239, __s2_239, 1, 0); \ -float64x2_t __reint_239 = __rev2_239; \ -uint64x2_t __reint1_239 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_239, __p3_239), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_239, __p3_239)}; \ - __ret_239 = __noswap_vcmlaq_f64(__rev0_239, __rev1_239, *(float64x2_t *) &__reint1_239); \ - __ret_239 = __builtin_shufflevector(__ret_239, __ret_239, 1, 0); \ - __ret_239; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float64x2_t vcmlaq_rot180_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) { - float64x2_t __ret; - __ret = (float64x2_t) __builtin_neon_vcmlaq_rot180_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 42); - return __ret; -} -#else -__ai float64x2_t vcmlaq_rot180_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) { - float64x2_t __ret; - float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - float64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); - __ret = (float64x2_t) __builtin_neon_vcmlaq_rot180_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 42); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -__ai float64x2_t __noswap_vcmlaq_rot180_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) { - float64x2_t __ret; - __ret = (float64x2_t) __builtin_neon_vcmlaq_rot180_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 42); - return __ret; -} -#endif - -__ai float64x1_t vcmla_rot180_f64(float64x1_t __p0, float64x1_t __p1, float64x1_t __p2) { - float64x1_t __ret; - __ret = (float64x1_t) __builtin_neon_vcmla_rot180_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 10); - return __ret; -} -#define vcmla_rot180_lane_f64(__p0_240, __p1_240, __p2_240, __p3_240) __extension__ ({ \ - float64x1_t __ret_240; \ - float64x1_t __s0_240 = __p0_240; \ - float64x1_t __s1_240 = __p1_240; \ - float64x1_t __s2_240 = __p2_240; \ -float64x1_t __reint_240 = __s2_240; \ -uint64x2_t __reint1_240 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_240, __p3_240), vgetq_lane_u64(*(uint64x2_t *) &__reint_240, __p3_240)}; \ - __ret_240 = vcmla_rot180_f64(__s0_240, __s1_240, *(float64x1_t *) &__reint1_240); \ - __ret_240; \ -}) -#ifdef __LITTLE_ENDIAN__ -#define vcmlaq_rot180_lane_f64(__p0_241, __p1_241, __p2_241, __p3_241) __extension__ ({ \ - float64x2_t __ret_241; \ - float64x2_t __s0_241 = __p0_241; \ - float64x2_t __s1_241 = __p1_241; \ - float64x1_t __s2_241 = __p2_241; \ -float64x1_t __reint_241 = __s2_241; \ -uint64x2_t __reint1_241 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_241, __p3_241), vgetq_lane_u64(*(uint64x2_t *) &__reint_241, __p3_241)}; \ - __ret_241 = vcmlaq_rot180_f64(__s0_241, __s1_241, *(float64x2_t *) &__reint1_241); \ - __ret_241; \ -}) -#else -#define vcmlaq_rot180_lane_f64(__p0_242, __p1_242, __p2_242, __p3_242) __extension__ ({ \ - float64x2_t __ret_242; \ - float64x2_t __s0_242 = __p0_242; \ - float64x2_t __s1_242 = __p1_242; \ - float64x1_t __s2_242 = __p2_242; \ - float64x2_t __rev0_242; __rev0_242 = __builtin_shufflevector(__s0_242, __s0_242, 1, 0); \ - float64x2_t __rev1_242; __rev1_242 = __builtin_shufflevector(__s1_242, __s1_242, 1, 0); \ -float64x1_t __reint_242 = __s2_242; \ -uint64x2_t __reint1_242 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_242, __p3_242), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_242, __p3_242)}; \ - __ret_242 = __noswap_vcmlaq_rot180_f64(__rev0_242, __rev1_242, *(float64x2_t *) &__reint1_242); \ - __ret_242 = __builtin_shufflevector(__ret_242, __ret_242, 1, 0); \ - __ret_242; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcmla_rot180_laneq_f64(__p0_243, __p1_243, __p2_243, __p3_243) __extension__ ({ \ - float64x1_t __ret_243; \ - float64x1_t __s0_243 = __p0_243; \ - float64x1_t __s1_243 = __p1_243; \ - float64x2_t __s2_243 = __p2_243; \ -float64x2_t __reint_243 = __s2_243; \ -uint64x2_t __reint1_243 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_243, __p3_243), vgetq_lane_u64(*(uint64x2_t *) &__reint_243, __p3_243)}; \ - __ret_243 = vcmla_rot180_f64(__s0_243, __s1_243, *(float64x1_t *) &__reint1_243); \ - __ret_243; \ -}) -#else -#define vcmla_rot180_laneq_f64(__p0_244, __p1_244, __p2_244, __p3_244) __extension__ ({ \ - float64x1_t __ret_244; \ - float64x1_t __s0_244 = __p0_244; \ - float64x1_t __s1_244 = __p1_244; \ - float64x2_t __s2_244 = __p2_244; \ - float64x2_t __rev2_244; __rev2_244 = __builtin_shufflevector(__s2_244, __s2_244, 1, 0); \ -float64x2_t __reint_244 = __rev2_244; \ -uint64x2_t __reint1_244 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_244, __p3_244), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_244, __p3_244)}; \ - __ret_244 = vcmla_rot180_f64(__s0_244, __s1_244, *(float64x1_t *) &__reint1_244); \ - __ret_244; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcmlaq_rot180_laneq_f64(__p0_245, __p1_245, __p2_245, __p3_245) __extension__ ({ \ - float64x2_t __ret_245; \ - float64x2_t __s0_245 = __p0_245; \ - float64x2_t __s1_245 = __p1_245; \ - float64x2_t __s2_245 = __p2_245; \ -float64x2_t __reint_245 = __s2_245; \ -uint64x2_t __reint1_245 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_245, __p3_245), vgetq_lane_u64(*(uint64x2_t *) &__reint_245, __p3_245)}; \ - __ret_245 = vcmlaq_rot180_f64(__s0_245, __s1_245, *(float64x2_t *) &__reint1_245); \ - __ret_245; \ -}) -#else -#define vcmlaq_rot180_laneq_f64(__p0_246, __p1_246, __p2_246, __p3_246) __extension__ ({ \ - float64x2_t __ret_246; \ - float64x2_t __s0_246 = __p0_246; \ - float64x2_t __s1_246 = __p1_246; \ - float64x2_t __s2_246 = __p2_246; \ - float64x2_t __rev0_246; __rev0_246 = __builtin_shufflevector(__s0_246, __s0_246, 1, 0); \ - float64x2_t __rev1_246; __rev1_246 = __builtin_shufflevector(__s1_246, __s1_246, 1, 0); \ - float64x2_t __rev2_246; __rev2_246 = __builtin_shufflevector(__s2_246, __s2_246, 1, 0); \ -float64x2_t __reint_246 = __rev2_246; \ -uint64x2_t __reint1_246 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_246, __p3_246), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_246, __p3_246)}; \ - __ret_246 = __noswap_vcmlaq_rot180_f64(__rev0_246, __rev1_246, *(float64x2_t *) &__reint1_246); \ - __ret_246 = __builtin_shufflevector(__ret_246, __ret_246, 1, 0); \ - __ret_246; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float64x2_t vcmlaq_rot270_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) { - float64x2_t __ret; - __ret = (float64x2_t) __builtin_neon_vcmlaq_rot270_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 42); - return __ret; -} -#else -__ai float64x2_t vcmlaq_rot270_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) { - float64x2_t __ret; - float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - float64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); - __ret = (float64x2_t) __builtin_neon_vcmlaq_rot270_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 42); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -__ai float64x2_t __noswap_vcmlaq_rot270_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) { - float64x2_t __ret; - __ret = (float64x2_t) __builtin_neon_vcmlaq_rot270_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 42); - return __ret; -} -#endif - -__ai float64x1_t vcmla_rot270_f64(float64x1_t __p0, float64x1_t __p1, float64x1_t __p2) { - float64x1_t __ret; - __ret = (float64x1_t) __builtin_neon_vcmla_rot270_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 10); - return __ret; -} -#define vcmla_rot270_lane_f64(__p0_247, __p1_247, __p2_247, __p3_247) __extension__ ({ \ - float64x1_t __ret_247; \ - float64x1_t __s0_247 = __p0_247; \ - float64x1_t __s1_247 = __p1_247; \ - float64x1_t __s2_247 = __p2_247; \ -float64x1_t __reint_247 = __s2_247; \ -uint64x2_t __reint1_247 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_247, __p3_247), vgetq_lane_u64(*(uint64x2_t *) &__reint_247, __p3_247)}; \ - __ret_247 = vcmla_rot270_f64(__s0_247, __s1_247, *(float64x1_t *) &__reint1_247); \ - __ret_247; \ -}) -#ifdef __LITTLE_ENDIAN__ -#define vcmlaq_rot270_lane_f64(__p0_248, __p1_248, __p2_248, __p3_248) __extension__ ({ \ - float64x2_t __ret_248; \ - float64x2_t __s0_248 = __p0_248; \ - float64x2_t __s1_248 = __p1_248; \ - float64x1_t __s2_248 = __p2_248; \ -float64x1_t __reint_248 = __s2_248; \ -uint64x2_t __reint1_248 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_248, __p3_248), vgetq_lane_u64(*(uint64x2_t *) &__reint_248, __p3_248)}; \ - __ret_248 = vcmlaq_rot270_f64(__s0_248, __s1_248, *(float64x2_t *) &__reint1_248); \ - __ret_248; \ -}) -#else -#define vcmlaq_rot270_lane_f64(__p0_249, __p1_249, __p2_249, __p3_249) __extension__ ({ \ - float64x2_t __ret_249; \ - float64x2_t __s0_249 = __p0_249; \ - float64x2_t __s1_249 = __p1_249; \ - float64x1_t __s2_249 = __p2_249; \ - float64x2_t __rev0_249; __rev0_249 = __builtin_shufflevector(__s0_249, __s0_249, 1, 0); \ - float64x2_t __rev1_249; __rev1_249 = __builtin_shufflevector(__s1_249, __s1_249, 1, 0); \ -float64x1_t __reint_249 = __s2_249; \ -uint64x2_t __reint1_249 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_249, __p3_249), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_249, __p3_249)}; \ - __ret_249 = __noswap_vcmlaq_rot270_f64(__rev0_249, __rev1_249, *(float64x2_t *) &__reint1_249); \ - __ret_249 = __builtin_shufflevector(__ret_249, __ret_249, 1, 0); \ - __ret_249; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcmla_rot270_laneq_f64(__p0_250, __p1_250, __p2_250, __p3_250) __extension__ ({ \ - float64x1_t __ret_250; \ - float64x1_t __s0_250 = __p0_250; \ - float64x1_t __s1_250 = __p1_250; \ - float64x2_t __s2_250 = __p2_250; \ -float64x2_t __reint_250 = __s2_250; \ -uint64x2_t __reint1_250 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_250, __p3_250), vgetq_lane_u64(*(uint64x2_t *) &__reint_250, __p3_250)}; \ - __ret_250 = vcmla_rot270_f64(__s0_250, __s1_250, *(float64x1_t *) &__reint1_250); \ - __ret_250; \ -}) -#else -#define vcmla_rot270_laneq_f64(__p0_251, __p1_251, __p2_251, __p3_251) __extension__ ({ \ - float64x1_t __ret_251; \ - float64x1_t __s0_251 = __p0_251; \ - float64x1_t __s1_251 = __p1_251; \ - float64x2_t __s2_251 = __p2_251; \ - float64x2_t __rev2_251; __rev2_251 = __builtin_shufflevector(__s2_251, __s2_251, 1, 0); \ -float64x2_t __reint_251 = __rev2_251; \ -uint64x2_t __reint1_251 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_251, __p3_251), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_251, __p3_251)}; \ - __ret_251 = vcmla_rot270_f64(__s0_251, __s1_251, *(float64x1_t *) &__reint1_251); \ - __ret_251; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcmlaq_rot270_laneq_f64(__p0_252, __p1_252, __p2_252, __p3_252) __extension__ ({ \ - float64x2_t __ret_252; \ - float64x2_t __s0_252 = __p0_252; \ - float64x2_t __s1_252 = __p1_252; \ - float64x2_t __s2_252 = __p2_252; \ -float64x2_t __reint_252 = __s2_252; \ -uint64x2_t __reint1_252 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_252, __p3_252), vgetq_lane_u64(*(uint64x2_t *) &__reint_252, __p3_252)}; \ - __ret_252 = vcmlaq_rot270_f64(__s0_252, __s1_252, *(float64x2_t *) &__reint1_252); \ - __ret_252; \ -}) -#else -#define vcmlaq_rot270_laneq_f64(__p0_253, __p1_253, __p2_253, __p3_253) __extension__ ({ \ - float64x2_t __ret_253; \ - float64x2_t __s0_253 = __p0_253; \ - float64x2_t __s1_253 = __p1_253; \ - float64x2_t __s2_253 = __p2_253; \ - float64x2_t __rev0_253; __rev0_253 = __builtin_shufflevector(__s0_253, __s0_253, 1, 0); \ - float64x2_t __rev1_253; __rev1_253 = __builtin_shufflevector(__s1_253, __s1_253, 1, 0); \ - float64x2_t __rev2_253; __rev2_253 = __builtin_shufflevector(__s2_253, __s2_253, 1, 0); \ -float64x2_t __reint_253 = __rev2_253; \ -uint64x2_t __reint1_253 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_253, __p3_253), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_253, __p3_253)}; \ - __ret_253 = __noswap_vcmlaq_rot270_f64(__rev0_253, __rev1_253, *(float64x2_t *) &__reint1_253); \ - __ret_253 = __builtin_shufflevector(__ret_253, __ret_253, 1, 0); \ - __ret_253; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float64x2_t vcmlaq_rot90_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) { - float64x2_t __ret; - __ret = (float64x2_t) __builtin_neon_vcmlaq_rot90_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 42); - return __ret; -} -#else -__ai float64x2_t vcmlaq_rot90_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) { - float64x2_t __ret; - float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - float64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); - __ret = (float64x2_t) __builtin_neon_vcmlaq_rot90_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 42); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -__ai float64x2_t __noswap_vcmlaq_rot90_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) { - float64x2_t __ret; - __ret = (float64x2_t) __builtin_neon_vcmlaq_rot90_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 42); - return __ret; -} -#endif - -__ai float64x1_t vcmla_rot90_f64(float64x1_t __p0, float64x1_t __p1, float64x1_t __p2) { - float64x1_t __ret; - __ret = (float64x1_t) __builtin_neon_vcmla_rot90_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 10); - return __ret; -} -#define vcmla_rot90_lane_f64(__p0_254, __p1_254, __p2_254, __p3_254) __extension__ ({ \ - float64x1_t __ret_254; \ - float64x1_t __s0_254 = __p0_254; \ - float64x1_t __s1_254 = __p1_254; \ - float64x1_t __s2_254 = __p2_254; \ -float64x1_t __reint_254 = __s2_254; \ -uint64x2_t __reint1_254 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_254, __p3_254), vgetq_lane_u64(*(uint64x2_t *) &__reint_254, __p3_254)}; \ - __ret_254 = vcmla_rot90_f64(__s0_254, __s1_254, *(float64x1_t *) &__reint1_254); \ - __ret_254; \ -}) -#ifdef __LITTLE_ENDIAN__ -#define vcmlaq_rot90_lane_f64(__p0_255, __p1_255, __p2_255, __p3_255) __extension__ ({ \ - float64x2_t __ret_255; \ - float64x2_t __s0_255 = __p0_255; \ - float64x2_t __s1_255 = __p1_255; \ - float64x1_t __s2_255 = __p2_255; \ -float64x1_t __reint_255 = __s2_255; \ -uint64x2_t __reint1_255 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_255, __p3_255), vgetq_lane_u64(*(uint64x2_t *) &__reint_255, __p3_255)}; \ - __ret_255 = vcmlaq_rot90_f64(__s0_255, __s1_255, *(float64x2_t *) &__reint1_255); \ - __ret_255; \ -}) -#else -#define vcmlaq_rot90_lane_f64(__p0_256, __p1_256, __p2_256, __p3_256) __extension__ ({ \ - float64x2_t __ret_256; \ - float64x2_t __s0_256 = __p0_256; \ - float64x2_t __s1_256 = __p1_256; \ - float64x1_t __s2_256 = __p2_256; \ - float64x2_t __rev0_256; __rev0_256 = __builtin_shufflevector(__s0_256, __s0_256, 1, 0); \ - float64x2_t __rev1_256; __rev1_256 = __builtin_shufflevector(__s1_256, __s1_256, 1, 0); \ -float64x1_t __reint_256 = __s2_256; \ -uint64x2_t __reint1_256 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_256, __p3_256), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_256, __p3_256)}; \ - __ret_256 = __noswap_vcmlaq_rot90_f64(__rev0_256, __rev1_256, *(float64x2_t *) &__reint1_256); \ - __ret_256 = __builtin_shufflevector(__ret_256, __ret_256, 1, 0); \ - __ret_256; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcmla_rot90_laneq_f64(__p0_257, __p1_257, __p2_257, __p3_257) __extension__ ({ \ - float64x1_t __ret_257; \ - float64x1_t __s0_257 = __p0_257; \ - float64x1_t __s1_257 = __p1_257; \ - float64x2_t __s2_257 = __p2_257; \ -float64x2_t __reint_257 = __s2_257; \ -uint64x2_t __reint1_257 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_257, __p3_257), vgetq_lane_u64(*(uint64x2_t *) &__reint_257, __p3_257)}; \ - __ret_257 = vcmla_rot90_f64(__s0_257, __s1_257, *(float64x1_t *) &__reint1_257); \ - __ret_257; \ -}) -#else -#define vcmla_rot90_laneq_f64(__p0_258, __p1_258, __p2_258, __p3_258) __extension__ ({ \ - float64x1_t __ret_258; \ - float64x1_t __s0_258 = __p0_258; \ - float64x1_t __s1_258 = __p1_258; \ - float64x2_t __s2_258 = __p2_258; \ - float64x2_t __rev2_258; __rev2_258 = __builtin_shufflevector(__s2_258, __s2_258, 1, 0); \ -float64x2_t __reint_258 = __rev2_258; \ -uint64x2_t __reint1_258 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_258, __p3_258), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_258, __p3_258)}; \ - __ret_258 = vcmla_rot90_f64(__s0_258, __s1_258, *(float64x1_t *) &__reint1_258); \ - __ret_258; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcmlaq_rot90_laneq_f64(__p0_259, __p1_259, __p2_259, __p3_259) __extension__ ({ \ - float64x2_t __ret_259; \ - float64x2_t __s0_259 = __p0_259; \ - float64x2_t __s1_259 = __p1_259; \ - float64x2_t __s2_259 = __p2_259; \ -float64x2_t __reint_259 = __s2_259; \ -uint64x2_t __reint1_259 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_259, __p3_259), vgetq_lane_u64(*(uint64x2_t *) &__reint_259, __p3_259)}; \ - __ret_259 = vcmlaq_rot90_f64(__s0_259, __s1_259, *(float64x2_t *) &__reint1_259); \ - __ret_259; \ -}) -#else -#define vcmlaq_rot90_laneq_f64(__p0_260, __p1_260, __p2_260, __p3_260) __extension__ ({ \ - float64x2_t __ret_260; \ - float64x2_t __s0_260 = __p0_260; \ - float64x2_t __s1_260 = __p1_260; \ - float64x2_t __s2_260 = __p2_260; \ - float64x2_t __rev0_260; __rev0_260 = __builtin_shufflevector(__s0_260, __s0_260, 1, 0); \ - float64x2_t __rev1_260; __rev1_260 = __builtin_shufflevector(__s1_260, __s1_260, 1, 0); \ - float64x2_t __rev2_260; __rev2_260 = __builtin_shufflevector(__s2_260, __s2_260, 1, 0); \ -float64x2_t __reint_260 = __rev2_260; \ -uint64x2_t __reint1_260 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_260, __p3_260), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_260, __p3_260)}; \ - __ret_260 = __noswap_vcmlaq_rot90_f64(__rev0_260, __rev1_260, *(float64x2_t *) &__reint1_260); \ - __ret_260 = __builtin_shufflevector(__ret_260, __ret_260, 1, 0); \ - __ret_260; \ -}) -#endif - -#endif -#if defined(__ARM_FEATURE_DOTPROD) -#ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vdotq_u32(uint32x4_t __p0, uint8x16_t __p1, uint8x16_t __p2) { - uint32x4_t __ret; - __ret = (uint32x4_t) __builtin_neon_vdotq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 50); - return __ret; -} -#else -__ai uint32x4_t vdotq_u32(uint32x4_t __p0, uint8x16_t __p1, uint8x16_t __p2) { - uint32x4_t __ret; - uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint32x4_t) __builtin_neon_vdotq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 50); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -__ai uint32x4_t __noswap_vdotq_u32(uint32x4_t __p0, uint8x16_t __p1, uint8x16_t __p2) { - uint32x4_t __ret; - __ret = (uint32x4_t) __builtin_neon_vdotq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 50); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vdotq_s32(int32x4_t __p0, int8x16_t __p1, int8x16_t __p2) { - int32x4_t __ret; - __ret = (int32x4_t) __builtin_neon_vdotq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 34); - return __ret; -} -#else -__ai int32x4_t vdotq_s32(int32x4_t __p0, int8x16_t __p1, int8x16_t __p2) { - int32x4_t __ret; - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - int8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (int32x4_t) __builtin_neon_vdotq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 34); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -__ai int32x4_t __noswap_vdotq_s32(int32x4_t __p0, int8x16_t __p1, int8x16_t __p2) { - int32x4_t __ret; - __ret = (int32x4_t) __builtin_neon_vdotq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 34); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vdot_u32(uint32x2_t __p0, uint8x8_t __p1, uint8x8_t __p2) { - uint32x2_t __ret; - __ret = (uint32x2_t) __builtin_neon_vdot_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 18); - return __ret; -} -#else -__ai uint32x2_t vdot_u32(uint32x2_t __p0, uint8x8_t __p1, uint8x8_t __p2) { - uint32x2_t __ret; - uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint32x2_t) __builtin_neon_vdot_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 18); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -__ai uint32x2_t __noswap_vdot_u32(uint32x2_t __p0, uint8x8_t __p1, uint8x8_t __p2) { - uint32x2_t __ret; - __ret = (uint32x2_t) __builtin_neon_vdot_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 18); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x2_t vdot_s32(int32x2_t __p0, int8x8_t __p1, int8x8_t __p2) { - int32x2_t __ret; - __ret = (int32x2_t) __builtin_neon_vdot_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 2); - return __ret; -} -#else -__ai int32x2_t vdot_s32(int32x2_t __p0, int8x8_t __p1, int8x8_t __p2) { - int32x2_t __ret; - int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - int8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (int32x2_t) __builtin_neon_vdot_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 2); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -__ai int32x2_t __noswap_vdot_s32(int32x2_t __p0, int8x8_t __p1, int8x8_t __p2) { - int32x2_t __ret; - __ret = (int32x2_t) __builtin_neon_vdot_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 2); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vdotq_lane_u32(__p0_261, __p1_261, __p2_261, __p3_261) __extension__ ({ \ - uint32x4_t __ret_261; \ - uint32x4_t __s0_261 = __p0_261; \ - uint8x16_t __s1_261 = __p1_261; \ - uint8x8_t __s2_261 = __p2_261; \ -uint8x8_t __reint_261 = __s2_261; \ -uint32x4_t __reint1_261 = splatq_lane_u32(*(uint32x2_t *) &__reint_261, __p3_261); \ - __ret_261 = vdotq_u32(__s0_261, __s1_261, *(uint8x16_t *) &__reint1_261); \ - __ret_261; \ -}) -#else -#define vdotq_lane_u32(__p0_262, __p1_262, __p2_262, __p3_262) __extension__ ({ \ - uint32x4_t __ret_262; \ - uint32x4_t __s0_262 = __p0_262; \ - uint8x16_t __s1_262 = __p1_262; \ - uint8x8_t __s2_262 = __p2_262; \ - uint32x4_t __rev0_262; __rev0_262 = __builtin_shufflevector(__s0_262, __s0_262, 3, 2, 1, 0); \ - uint8x16_t __rev1_262; __rev1_262 = __builtin_shufflevector(__s1_262, __s1_262, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - uint8x8_t __rev2_262; __rev2_262 = __builtin_shufflevector(__s2_262, __s2_262, 7, 6, 5, 4, 3, 2, 1, 0); \ -uint8x8_t __reint_262 = __rev2_262; \ -uint32x4_t __reint1_262 = __noswap_splatq_lane_u32(*(uint32x2_t *) &__reint_262, __p3_262); \ - __ret_262 = __noswap_vdotq_u32(__rev0_262, __rev1_262, *(uint8x16_t *) &__reint1_262); \ - __ret_262 = __builtin_shufflevector(__ret_262, __ret_262, 3, 2, 1, 0); \ - __ret_262; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vdotq_lane_s32(__p0_263, __p1_263, __p2_263, __p3_263) __extension__ ({ \ - int32x4_t __ret_263; \ - int32x4_t __s0_263 = __p0_263; \ - int8x16_t __s1_263 = __p1_263; \ - int8x8_t __s2_263 = __p2_263; \ -int8x8_t __reint_263 = __s2_263; \ -int32x4_t __reint1_263 = splatq_lane_s32(*(int32x2_t *) &__reint_263, __p3_263); \ - __ret_263 = vdotq_s32(__s0_263, __s1_263, *(int8x16_t *) &__reint1_263); \ - __ret_263; \ -}) -#else -#define vdotq_lane_s32(__p0_264, __p1_264, __p2_264, __p3_264) __extension__ ({ \ - int32x4_t __ret_264; \ - int32x4_t __s0_264 = __p0_264; \ - int8x16_t __s1_264 = __p1_264; \ - int8x8_t __s2_264 = __p2_264; \ - int32x4_t __rev0_264; __rev0_264 = __builtin_shufflevector(__s0_264, __s0_264, 3, 2, 1, 0); \ - int8x16_t __rev1_264; __rev1_264 = __builtin_shufflevector(__s1_264, __s1_264, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - int8x8_t __rev2_264; __rev2_264 = __builtin_shufflevector(__s2_264, __s2_264, 7, 6, 5, 4, 3, 2, 1, 0); \ -int8x8_t __reint_264 = __rev2_264; \ -int32x4_t __reint1_264 = __noswap_splatq_lane_s32(*(int32x2_t *) &__reint_264, __p3_264); \ - __ret_264 = __noswap_vdotq_s32(__rev0_264, __rev1_264, *(int8x16_t *) &__reint1_264); \ - __ret_264 = __builtin_shufflevector(__ret_264, __ret_264, 3, 2, 1, 0); \ - __ret_264; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vdot_lane_u32(__p0_265, __p1_265, __p2_265, __p3_265) __extension__ ({ \ - uint32x2_t __ret_265; \ - uint32x2_t __s0_265 = __p0_265; \ - uint8x8_t __s1_265 = __p1_265; \ - uint8x8_t __s2_265 = __p2_265; \ -uint8x8_t __reint_265 = __s2_265; \ -uint32x2_t __reint1_265 = splat_lane_u32(*(uint32x2_t *) &__reint_265, __p3_265); \ - __ret_265 = vdot_u32(__s0_265, __s1_265, *(uint8x8_t *) &__reint1_265); \ - __ret_265; \ -}) -#else -#define vdot_lane_u32(__p0_266, __p1_266, __p2_266, __p3_266) __extension__ ({ \ - uint32x2_t __ret_266; \ - uint32x2_t __s0_266 = __p0_266; \ - uint8x8_t __s1_266 = __p1_266; \ - uint8x8_t __s2_266 = __p2_266; \ - uint32x2_t __rev0_266; __rev0_266 = __builtin_shufflevector(__s0_266, __s0_266, 1, 0); \ - uint8x8_t __rev1_266; __rev1_266 = __builtin_shufflevector(__s1_266, __s1_266, 7, 6, 5, 4, 3, 2, 1, 0); \ - uint8x8_t __rev2_266; __rev2_266 = __builtin_shufflevector(__s2_266, __s2_266, 7, 6, 5, 4, 3, 2, 1, 0); \ -uint8x8_t __reint_266 = __rev2_266; \ -uint32x2_t __reint1_266 = __noswap_splat_lane_u32(*(uint32x2_t *) &__reint_266, __p3_266); \ - __ret_266 = __noswap_vdot_u32(__rev0_266, __rev1_266, *(uint8x8_t *) &__reint1_266); \ - __ret_266 = __builtin_shufflevector(__ret_266, __ret_266, 1, 0); \ - __ret_266; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vdot_lane_s32(__p0_267, __p1_267, __p2_267, __p3_267) __extension__ ({ \ - int32x2_t __ret_267; \ - int32x2_t __s0_267 = __p0_267; \ - int8x8_t __s1_267 = __p1_267; \ - int8x8_t __s2_267 = __p2_267; \ -int8x8_t __reint_267 = __s2_267; \ -int32x2_t __reint1_267 = splat_lane_s32(*(int32x2_t *) &__reint_267, __p3_267); \ - __ret_267 = vdot_s32(__s0_267, __s1_267, *(int8x8_t *) &__reint1_267); \ - __ret_267; \ -}) -#else -#define vdot_lane_s32(__p0_268, __p1_268, __p2_268, __p3_268) __extension__ ({ \ - int32x2_t __ret_268; \ - int32x2_t __s0_268 = __p0_268; \ - int8x8_t __s1_268 = __p1_268; \ - int8x8_t __s2_268 = __p2_268; \ - int32x2_t __rev0_268; __rev0_268 = __builtin_shufflevector(__s0_268, __s0_268, 1, 0); \ - int8x8_t __rev1_268; __rev1_268 = __builtin_shufflevector(__s1_268, __s1_268, 7, 6, 5, 4, 3, 2, 1, 0); \ - int8x8_t __rev2_268; __rev2_268 = __builtin_shufflevector(__s2_268, __s2_268, 7, 6, 5, 4, 3, 2, 1, 0); \ -int8x8_t __reint_268 = __rev2_268; \ -int32x2_t __reint1_268 = __noswap_splat_lane_s32(*(int32x2_t *) &__reint_268, __p3_268); \ - __ret_268 = __noswap_vdot_s32(__rev0_268, __rev1_268, *(int8x8_t *) &__reint1_268); \ - __ret_268 = __builtin_shufflevector(__ret_268, __ret_268, 1, 0); \ - __ret_268; \ -}) -#endif - -#endif -#if defined(__ARM_FEATURE_DOTPROD) && defined(__aarch64__) -#ifdef __LITTLE_ENDIAN__ -#define vdotq_laneq_u32(__p0_269, __p1_269, __p2_269, __p3_269) __extension__ ({ \ - uint32x4_t __ret_269; \ - uint32x4_t __s0_269 = __p0_269; \ - uint8x16_t __s1_269 = __p1_269; \ - uint8x16_t __s2_269 = __p2_269; \ -uint8x16_t __reint_269 = __s2_269; \ -uint32x4_t __reint1_269 = splatq_laneq_u32(*(uint32x4_t *) &__reint_269, __p3_269); \ - __ret_269 = vdotq_u32(__s0_269, __s1_269, *(uint8x16_t *) &__reint1_269); \ - __ret_269; \ -}) -#else -#define vdotq_laneq_u32(__p0_270, __p1_270, __p2_270, __p3_270) __extension__ ({ \ - uint32x4_t __ret_270; \ - uint32x4_t __s0_270 = __p0_270; \ - uint8x16_t __s1_270 = __p1_270; \ - uint8x16_t __s2_270 = __p2_270; \ - uint32x4_t __rev0_270; __rev0_270 = __builtin_shufflevector(__s0_270, __s0_270, 3, 2, 1, 0); \ - uint8x16_t __rev1_270; __rev1_270 = __builtin_shufflevector(__s1_270, __s1_270, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - uint8x16_t __rev2_270; __rev2_270 = __builtin_shufflevector(__s2_270, __s2_270, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ -uint8x16_t __reint_270 = __rev2_270; \ -uint32x4_t __reint1_270 = __noswap_splatq_laneq_u32(*(uint32x4_t *) &__reint_270, __p3_270); \ - __ret_270 = __noswap_vdotq_u32(__rev0_270, __rev1_270, *(uint8x16_t *) &__reint1_270); \ - __ret_270 = __builtin_shufflevector(__ret_270, __ret_270, 3, 2, 1, 0); \ - __ret_270; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vdotq_laneq_s32(__p0_271, __p1_271, __p2_271, __p3_271) __extension__ ({ \ - int32x4_t __ret_271; \ - int32x4_t __s0_271 = __p0_271; \ - int8x16_t __s1_271 = __p1_271; \ - int8x16_t __s2_271 = __p2_271; \ -int8x16_t __reint_271 = __s2_271; \ -int32x4_t __reint1_271 = splatq_laneq_s32(*(int32x4_t *) &__reint_271, __p3_271); \ - __ret_271 = vdotq_s32(__s0_271, __s1_271, *(int8x16_t *) &__reint1_271); \ - __ret_271; \ -}) -#else -#define vdotq_laneq_s32(__p0_272, __p1_272, __p2_272, __p3_272) __extension__ ({ \ - int32x4_t __ret_272; \ - int32x4_t __s0_272 = __p0_272; \ - int8x16_t __s1_272 = __p1_272; \ - int8x16_t __s2_272 = __p2_272; \ - int32x4_t __rev0_272; __rev0_272 = __builtin_shufflevector(__s0_272, __s0_272, 3, 2, 1, 0); \ - int8x16_t __rev1_272; __rev1_272 = __builtin_shufflevector(__s1_272, __s1_272, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - int8x16_t __rev2_272; __rev2_272 = __builtin_shufflevector(__s2_272, __s2_272, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ -int8x16_t __reint_272 = __rev2_272; \ -int32x4_t __reint1_272 = __noswap_splatq_laneq_s32(*(int32x4_t *) &__reint_272, __p3_272); \ - __ret_272 = __noswap_vdotq_s32(__rev0_272, __rev1_272, *(int8x16_t *) &__reint1_272); \ - __ret_272 = __builtin_shufflevector(__ret_272, __ret_272, 3, 2, 1, 0); \ - __ret_272; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vdot_laneq_u32(__p0_273, __p1_273, __p2_273, __p3_273) __extension__ ({ \ - uint32x2_t __ret_273; \ - uint32x2_t __s0_273 = __p0_273; \ - uint8x8_t __s1_273 = __p1_273; \ - uint8x16_t __s2_273 = __p2_273; \ -uint8x16_t __reint_273 = __s2_273; \ -uint32x2_t __reint1_273 = splat_laneq_u32(*(uint32x4_t *) &__reint_273, __p3_273); \ - __ret_273 = vdot_u32(__s0_273, __s1_273, *(uint8x8_t *) &__reint1_273); \ - __ret_273; \ -}) -#else -#define vdot_laneq_u32(__p0_274, __p1_274, __p2_274, __p3_274) __extension__ ({ \ - uint32x2_t __ret_274; \ - uint32x2_t __s0_274 = __p0_274; \ - uint8x8_t __s1_274 = __p1_274; \ - uint8x16_t __s2_274 = __p2_274; \ - uint32x2_t __rev0_274; __rev0_274 = __builtin_shufflevector(__s0_274, __s0_274, 1, 0); \ - uint8x8_t __rev1_274; __rev1_274 = __builtin_shufflevector(__s1_274, __s1_274, 7, 6, 5, 4, 3, 2, 1, 0); \ - uint8x16_t __rev2_274; __rev2_274 = __builtin_shufflevector(__s2_274, __s2_274, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ -uint8x16_t __reint_274 = __rev2_274; \ -uint32x2_t __reint1_274 = __noswap_splat_laneq_u32(*(uint32x4_t *) &__reint_274, __p3_274); \ - __ret_274 = __noswap_vdot_u32(__rev0_274, __rev1_274, *(uint8x8_t *) &__reint1_274); \ - __ret_274 = __builtin_shufflevector(__ret_274, __ret_274, 1, 0); \ - __ret_274; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vdot_laneq_s32(__p0_275, __p1_275, __p2_275, __p3_275) __extension__ ({ \ - int32x2_t __ret_275; \ - int32x2_t __s0_275 = __p0_275; \ - int8x8_t __s1_275 = __p1_275; \ - int8x16_t __s2_275 = __p2_275; \ -int8x16_t __reint_275 = __s2_275; \ -int32x2_t __reint1_275 = splat_laneq_s32(*(int32x4_t *) &__reint_275, __p3_275); \ - __ret_275 = vdot_s32(__s0_275, __s1_275, *(int8x8_t *) &__reint1_275); \ - __ret_275; \ -}) -#else -#define vdot_laneq_s32(__p0_276, __p1_276, __p2_276, __p3_276) __extension__ ({ \ - int32x2_t __ret_276; \ - int32x2_t __s0_276 = __p0_276; \ - int8x8_t __s1_276 = __p1_276; \ - int8x16_t __s2_276 = __p2_276; \ - int32x2_t __rev0_276; __rev0_276 = __builtin_shufflevector(__s0_276, __s0_276, 1, 0); \ - int8x8_t __rev1_276; __rev1_276 = __builtin_shufflevector(__s1_276, __s1_276, 7, 6, 5, 4, 3, 2, 1, 0); \ - int8x16_t __rev2_276; __rev2_276 = __builtin_shufflevector(__s2_276, __s2_276, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ -int8x16_t __reint_276 = __rev2_276; \ -int32x2_t __reint1_276 = __noswap_splat_laneq_s32(*(int32x4_t *) &__reint_276, __p3_276); \ - __ret_276 = __noswap_vdot_s32(__rev0_276, __rev1_276, *(int8x8_t *) &__reint1_276); \ - __ret_276 = __builtin_shufflevector(__ret_276, __ret_276, 1, 0); \ - __ret_276; \ -}) -#endif - #endif #if defined(__ARM_FEATURE_FMA) #ifdef __LITTLE_ENDIAN__ @@ -43576,4198 +41729,6 @@ __ai float32x2_t vfms_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) } #endif -#endif -#if defined(__ARM_FEATURE_FP16_FML) && defined(__aarch64__) -#ifdef __LITTLE_ENDIAN__ -__ai float32x4_t vfmlalq_high_f16(float32x4_t __p0, float16x8_t __p1, float16x8_t __p2) { - float32x4_t __ret; - __ret = (float32x4_t) __builtin_neon_vfmlalq_high_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); - return __ret; -} -#else -__ai float32x4_t vfmlalq_high_f16(float32x4_t __p0, float16x8_t __p1, float16x8_t __p2) { - float32x4_t __ret; - float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - float16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (float32x4_t) __builtin_neon_vfmlalq_high_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -__ai float32x4_t __noswap_vfmlalq_high_f16(float32x4_t __p0, float16x8_t __p1, float16x8_t __p2) { - float32x4_t __ret; - __ret = (float32x4_t) __builtin_neon_vfmlalq_high_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float32x2_t vfmlal_high_f16(float32x2_t __p0, float16x4_t __p1, float16x4_t __p2) { - float32x2_t __ret; - __ret = (float32x2_t) __builtin_neon_vfmlal_high_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9); - return __ret; -} -#else -__ai float32x2_t vfmlal_high_f16(float32x2_t __p0, float16x4_t __p1, float16x4_t __p2) { - float32x2_t __ret; - float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - float16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); - __ret = (float32x2_t) __builtin_neon_vfmlal_high_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 9); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -__ai float32x2_t __noswap_vfmlal_high_f16(float32x2_t __p0, float16x4_t __p1, float16x4_t __p2) { - float32x2_t __ret; - __ret = (float32x2_t) __builtin_neon_vfmlal_high_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float32x4_t vfmlalq_low_f16(float32x4_t __p0, float16x8_t __p1, float16x8_t __p2) { - float32x4_t __ret; - __ret = (float32x4_t) __builtin_neon_vfmlalq_low_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); - return __ret; -} -#else -__ai float32x4_t vfmlalq_low_f16(float32x4_t __p0, float16x8_t __p1, float16x8_t __p2) { - float32x4_t __ret; - float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - float16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (float32x4_t) __builtin_neon_vfmlalq_low_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -__ai float32x4_t __noswap_vfmlalq_low_f16(float32x4_t __p0, float16x8_t __p1, float16x8_t __p2) { - float32x4_t __ret; - __ret = (float32x4_t) __builtin_neon_vfmlalq_low_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float32x2_t vfmlal_low_f16(float32x2_t __p0, float16x4_t __p1, float16x4_t __p2) { - float32x2_t __ret; - __ret = (float32x2_t) __builtin_neon_vfmlal_low_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9); - return __ret; -} -#else -__ai float32x2_t vfmlal_low_f16(float32x2_t __p0, float16x4_t __p1, float16x4_t __p2) { - float32x2_t __ret; - float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - float16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); - __ret = (float32x2_t) __builtin_neon_vfmlal_low_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 9); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -__ai float32x2_t __noswap_vfmlal_low_f16(float32x2_t __p0, float16x4_t __p1, float16x4_t __p2) { - float32x2_t __ret; - __ret = (float32x2_t) __builtin_neon_vfmlal_low_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float32x4_t vfmlslq_high_f16(float32x4_t __p0, float16x8_t __p1, float16x8_t __p2) { - float32x4_t __ret; - __ret = (float32x4_t) __builtin_neon_vfmlslq_high_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); - return __ret; -} -#else -__ai float32x4_t vfmlslq_high_f16(float32x4_t __p0, float16x8_t __p1, float16x8_t __p2) { - float32x4_t __ret; - float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - float16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (float32x4_t) __builtin_neon_vfmlslq_high_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -__ai float32x4_t __noswap_vfmlslq_high_f16(float32x4_t __p0, float16x8_t __p1, float16x8_t __p2) { - float32x4_t __ret; - __ret = (float32x4_t) __builtin_neon_vfmlslq_high_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float32x2_t vfmlsl_high_f16(float32x2_t __p0, float16x4_t __p1, float16x4_t __p2) { - float32x2_t __ret; - __ret = (float32x2_t) __builtin_neon_vfmlsl_high_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9); - return __ret; -} -#else -__ai float32x2_t vfmlsl_high_f16(float32x2_t __p0, float16x4_t __p1, float16x4_t __p2) { - float32x2_t __ret; - float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - float16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); - __ret = (float32x2_t) __builtin_neon_vfmlsl_high_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 9); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -__ai float32x2_t __noswap_vfmlsl_high_f16(float32x2_t __p0, float16x4_t __p1, float16x4_t __p2) { - float32x2_t __ret; - __ret = (float32x2_t) __builtin_neon_vfmlsl_high_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float32x4_t vfmlslq_low_f16(float32x4_t __p0, float16x8_t __p1, float16x8_t __p2) { - float32x4_t __ret; - __ret = (float32x4_t) __builtin_neon_vfmlslq_low_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); - return __ret; -} -#else -__ai float32x4_t vfmlslq_low_f16(float32x4_t __p0, float16x8_t __p1, float16x8_t __p2) { - float32x4_t __ret; - float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - float16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (float32x4_t) __builtin_neon_vfmlslq_low_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -__ai float32x4_t __noswap_vfmlslq_low_f16(float32x4_t __p0, float16x8_t __p1, float16x8_t __p2) { - float32x4_t __ret; - __ret = (float32x4_t) __builtin_neon_vfmlslq_low_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float32x2_t vfmlsl_low_f16(float32x2_t __p0, float16x4_t __p1, float16x4_t __p2) { - float32x2_t __ret; - __ret = (float32x2_t) __builtin_neon_vfmlsl_low_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9); - return __ret; -} -#else -__ai float32x2_t vfmlsl_low_f16(float32x2_t __p0, float16x4_t __p1, float16x4_t __p2) { - float32x2_t __ret; - float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - float16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); - __ret = (float32x2_t) __builtin_neon_vfmlsl_low_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 9); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -__ai float32x2_t __noswap_vfmlsl_low_f16(float32x2_t __p0, float16x4_t __p1, float16x4_t __p2) { - float32x2_t __ret; - __ret = (float32x2_t) __builtin_neon_vfmlsl_low_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9); - return __ret; -} -#endif - -#endif -#if defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC) -#ifdef __LITTLE_ENDIAN__ -__ai float16x8_t vabdq_f16(float16x8_t __p0, float16x8_t __p1) { - float16x8_t __ret; - __ret = (float16x8_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 40); - return __ret; -} -#else -__ai float16x8_t vabdq_f16(float16x8_t __p0, float16x8_t __p1) { - float16x8_t __ret; - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (float16x8_t) __builtin_neon_vabdq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 40); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16x4_t vabd_f16(float16x4_t __p0, float16x4_t __p1) { - float16x4_t __ret; - __ret = (float16x4_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 8); - return __ret; -} -#else -__ai float16x4_t vabd_f16(float16x4_t __p0, float16x4_t __p1) { - float16x4_t __ret; - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = (float16x4_t) __builtin_neon_vabd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 8); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16x8_t vabsq_f16(float16x8_t __p0) { - float16x8_t __ret; - __ret = (float16x8_t) __builtin_neon_vabsq_v((int8x16_t)__p0, 40); - return __ret; -} -#else -__ai float16x8_t vabsq_f16(float16x8_t __p0) { - float16x8_t __ret; - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (float16x8_t) __builtin_neon_vabsq_v((int8x16_t)__rev0, 40); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16x4_t vabs_f16(float16x4_t __p0) { - float16x4_t __ret; - __ret = (float16x4_t) __builtin_neon_vabs_v((int8x8_t)__p0, 8); - return __ret; -} -#else -__ai float16x4_t vabs_f16(float16x4_t __p0) { - float16x4_t __ret; - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - __ret = (float16x4_t) __builtin_neon_vabs_v((int8x8_t)__rev0, 8); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16x8_t vaddq_f16(float16x8_t __p0, float16x8_t __p1) { - float16x8_t __ret; - __ret = __p0 + __p1; - return __ret; -} -#else -__ai float16x8_t vaddq_f16(float16x8_t __p0, float16x8_t __p1) { - float16x8_t __ret; - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __rev0 + __rev1; - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16x4_t vadd_f16(float16x4_t __p0, float16x4_t __p1) { - float16x4_t __ret; - __ret = __p0 + __p1; - return __ret; -} -#else -__ai float16x4_t vadd_f16(float16x4_t __p0, float16x4_t __p1) { - float16x4_t __ret; - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = __rev0 + __rev1; - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16x8_t vbslq_f16(uint16x8_t __p0, float16x8_t __p1, float16x8_t __p2) { - float16x8_t __ret; - __ret = (float16x8_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 40); - return __ret; -} -#else -__ai float16x8_t vbslq_f16(uint16x8_t __p0, float16x8_t __p1, float16x8_t __p2) { - float16x8_t __ret; - uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - float16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (float16x8_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 40); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16x4_t vbsl_f16(uint16x4_t __p0, float16x4_t __p1, float16x4_t __p2) { - float16x4_t __ret; - __ret = (float16x4_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 8); - return __ret; -} -#else -__ai float16x4_t vbsl_f16(uint16x4_t __p0, float16x4_t __p1, float16x4_t __p2) { - float16x4_t __ret; - uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - float16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); - __ret = (float16x4_t) __builtin_neon_vbsl_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 8); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vcageq_f16(float16x8_t __p0, float16x8_t __p1) { - uint16x8_t __ret; - __ret = (uint16x8_t) __builtin_neon_vcageq_v((int8x16_t)__p0, (int8x16_t)__p1, 49); - return __ret; -} -#else -__ai uint16x8_t vcageq_f16(float16x8_t __p0, float16x8_t __p1) { - uint16x8_t __ret; - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint16x8_t) __builtin_neon_vcageq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vcage_f16(float16x4_t __p0, float16x4_t __p1) { - uint16x4_t __ret; - __ret = (uint16x4_t) __builtin_neon_vcage_v((int8x8_t)__p0, (int8x8_t)__p1, 17); - return __ret; -} -#else -__ai uint16x4_t vcage_f16(float16x4_t __p0, float16x4_t __p1) { - uint16x4_t __ret; - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = (uint16x4_t) __builtin_neon_vcage_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vcagtq_f16(float16x8_t __p0, float16x8_t __p1) { - uint16x8_t __ret; - __ret = (uint16x8_t) __builtin_neon_vcagtq_v((int8x16_t)__p0, (int8x16_t)__p1, 49); - return __ret; -} -#else -__ai uint16x8_t vcagtq_f16(float16x8_t __p0, float16x8_t __p1) { - uint16x8_t __ret; - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint16x8_t) __builtin_neon_vcagtq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vcagt_f16(float16x4_t __p0, float16x4_t __p1) { - uint16x4_t __ret; - __ret = (uint16x4_t) __builtin_neon_vcagt_v((int8x8_t)__p0, (int8x8_t)__p1, 17); - return __ret; -} -#else -__ai uint16x4_t vcagt_f16(float16x4_t __p0, float16x4_t __p1) { - uint16x4_t __ret; - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = (uint16x4_t) __builtin_neon_vcagt_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vcaleq_f16(float16x8_t __p0, float16x8_t __p1) { - uint16x8_t __ret; - __ret = (uint16x8_t) __builtin_neon_vcaleq_v((int8x16_t)__p0, (int8x16_t)__p1, 49); - return __ret; -} -#else -__ai uint16x8_t vcaleq_f16(float16x8_t __p0, float16x8_t __p1) { - uint16x8_t __ret; - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint16x8_t) __builtin_neon_vcaleq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vcale_f16(float16x4_t __p0, float16x4_t __p1) { - uint16x4_t __ret; - __ret = (uint16x4_t) __builtin_neon_vcale_v((int8x8_t)__p0, (int8x8_t)__p1, 17); - return __ret; -} -#else -__ai uint16x4_t vcale_f16(float16x4_t __p0, float16x4_t __p1) { - uint16x4_t __ret; - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = (uint16x4_t) __builtin_neon_vcale_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vcaltq_f16(float16x8_t __p0, float16x8_t __p1) { - uint16x8_t __ret; - __ret = (uint16x8_t) __builtin_neon_vcaltq_v((int8x16_t)__p0, (int8x16_t)__p1, 49); - return __ret; -} -#else -__ai uint16x8_t vcaltq_f16(float16x8_t __p0, float16x8_t __p1) { - uint16x8_t __ret; - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint16x8_t) __builtin_neon_vcaltq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vcalt_f16(float16x4_t __p0, float16x4_t __p1) { - uint16x4_t __ret; - __ret = (uint16x4_t) __builtin_neon_vcalt_v((int8x8_t)__p0, (int8x8_t)__p1, 17); - return __ret; -} -#else -__ai uint16x4_t vcalt_f16(float16x4_t __p0, float16x4_t __p1) { - uint16x4_t __ret; - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = (uint16x4_t) __builtin_neon_vcalt_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vceqq_f16(float16x8_t __p0, float16x8_t __p1) { - uint16x8_t __ret; - __ret = (uint16x8_t)(__p0 == __p1); - return __ret; -} -#else -__ai uint16x8_t vceqq_f16(float16x8_t __p0, float16x8_t __p1) { - uint16x8_t __ret; - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint16x8_t)(__rev0 == __rev1); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vceq_f16(float16x4_t __p0, float16x4_t __p1) { - uint16x4_t __ret; - __ret = (uint16x4_t)(__p0 == __p1); - return __ret; -} -#else -__ai uint16x4_t vceq_f16(float16x4_t __p0, float16x4_t __p1) { - uint16x4_t __ret; - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = (uint16x4_t)(__rev0 == __rev1); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vceqzq_f16(float16x8_t __p0) { - uint16x8_t __ret; - __ret = (uint16x8_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 49); - return __ret; -} -#else -__ai uint16x8_t vceqzq_f16(float16x8_t __p0) { - uint16x8_t __ret; - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint16x8_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 49); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vceqz_f16(float16x4_t __p0) { - uint16x4_t __ret; - __ret = (uint16x4_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 17); - return __ret; -} -#else -__ai uint16x4_t vceqz_f16(float16x4_t __p0) { - uint16x4_t __ret; - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - __ret = (uint16x4_t) __builtin_neon_vceqz_v((int8x8_t)__rev0, 17); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vcgeq_f16(float16x8_t __p0, float16x8_t __p1) { - uint16x8_t __ret; - __ret = (uint16x8_t)(__p0 >= __p1); - return __ret; -} -#else -__ai uint16x8_t vcgeq_f16(float16x8_t __p0, float16x8_t __p1) { - uint16x8_t __ret; - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint16x8_t)(__rev0 >= __rev1); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vcge_f16(float16x4_t __p0, float16x4_t __p1) { - uint16x4_t __ret; - __ret = (uint16x4_t)(__p0 >= __p1); - return __ret; -} -#else -__ai uint16x4_t vcge_f16(float16x4_t __p0, float16x4_t __p1) { - uint16x4_t __ret; - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = (uint16x4_t)(__rev0 >= __rev1); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vcgezq_f16(float16x8_t __p0) { - uint16x8_t __ret; - __ret = (uint16x8_t) __builtin_neon_vcgezq_v((int8x16_t)__p0, 49); - return __ret; -} -#else -__ai uint16x8_t vcgezq_f16(float16x8_t __p0) { - uint16x8_t __ret; - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint16x8_t) __builtin_neon_vcgezq_v((int8x16_t)__rev0, 49); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vcgez_f16(float16x4_t __p0) { - uint16x4_t __ret; - __ret = (uint16x4_t) __builtin_neon_vcgez_v((int8x8_t)__p0, 17); - return __ret; -} -#else -__ai uint16x4_t vcgez_f16(float16x4_t __p0) { - uint16x4_t __ret; - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - __ret = (uint16x4_t) __builtin_neon_vcgez_v((int8x8_t)__rev0, 17); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vcgtq_f16(float16x8_t __p0, float16x8_t __p1) { - uint16x8_t __ret; - __ret = (uint16x8_t)(__p0 > __p1); - return __ret; -} -#else -__ai uint16x8_t vcgtq_f16(float16x8_t __p0, float16x8_t __p1) { - uint16x8_t __ret; - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint16x8_t)(__rev0 > __rev1); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vcgt_f16(float16x4_t __p0, float16x4_t __p1) { - uint16x4_t __ret; - __ret = (uint16x4_t)(__p0 > __p1); - return __ret; -} -#else -__ai uint16x4_t vcgt_f16(float16x4_t __p0, float16x4_t __p1) { - uint16x4_t __ret; - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = (uint16x4_t)(__rev0 > __rev1); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vcgtzq_f16(float16x8_t __p0) { - uint16x8_t __ret; - __ret = (uint16x8_t) __builtin_neon_vcgtzq_v((int8x16_t)__p0, 49); - return __ret; -} -#else -__ai uint16x8_t vcgtzq_f16(float16x8_t __p0) { - uint16x8_t __ret; - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint16x8_t) __builtin_neon_vcgtzq_v((int8x16_t)__rev0, 49); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vcgtz_f16(float16x4_t __p0) { - uint16x4_t __ret; - __ret = (uint16x4_t) __builtin_neon_vcgtz_v((int8x8_t)__p0, 17); - return __ret; -} -#else -__ai uint16x4_t vcgtz_f16(float16x4_t __p0) { - uint16x4_t __ret; - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - __ret = (uint16x4_t) __builtin_neon_vcgtz_v((int8x8_t)__rev0, 17); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vcleq_f16(float16x8_t __p0, float16x8_t __p1) { - uint16x8_t __ret; - __ret = (uint16x8_t)(__p0 <= __p1); - return __ret; -} -#else -__ai uint16x8_t vcleq_f16(float16x8_t __p0, float16x8_t __p1) { - uint16x8_t __ret; - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint16x8_t)(__rev0 <= __rev1); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vcle_f16(float16x4_t __p0, float16x4_t __p1) { - uint16x4_t __ret; - __ret = (uint16x4_t)(__p0 <= __p1); - return __ret; -} -#else -__ai uint16x4_t vcle_f16(float16x4_t __p0, float16x4_t __p1) { - uint16x4_t __ret; - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = (uint16x4_t)(__rev0 <= __rev1); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vclezq_f16(float16x8_t __p0) { - uint16x8_t __ret; - __ret = (uint16x8_t) __builtin_neon_vclezq_v((int8x16_t)__p0, 49); - return __ret; -} -#else -__ai uint16x8_t vclezq_f16(float16x8_t __p0) { - uint16x8_t __ret; - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint16x8_t) __builtin_neon_vclezq_v((int8x16_t)__rev0, 49); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vclez_f16(float16x4_t __p0) { - uint16x4_t __ret; - __ret = (uint16x4_t) __builtin_neon_vclez_v((int8x8_t)__p0, 17); - return __ret; -} -#else -__ai uint16x4_t vclez_f16(float16x4_t __p0) { - uint16x4_t __ret; - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - __ret = (uint16x4_t) __builtin_neon_vclez_v((int8x8_t)__rev0, 17); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vcltq_f16(float16x8_t __p0, float16x8_t __p1) { - uint16x8_t __ret; - __ret = (uint16x8_t)(__p0 < __p1); - return __ret; -} -#else -__ai uint16x8_t vcltq_f16(float16x8_t __p0, float16x8_t __p1) { - uint16x8_t __ret; - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint16x8_t)(__rev0 < __rev1); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vclt_f16(float16x4_t __p0, float16x4_t __p1) { - uint16x4_t __ret; - __ret = (uint16x4_t)(__p0 < __p1); - return __ret; -} -#else -__ai uint16x4_t vclt_f16(float16x4_t __p0, float16x4_t __p1) { - uint16x4_t __ret; - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = (uint16x4_t)(__rev0 < __rev1); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vcltzq_f16(float16x8_t __p0) { - uint16x8_t __ret; - __ret = (uint16x8_t) __builtin_neon_vcltzq_v((int8x16_t)__p0, 49); - return __ret; -} -#else -__ai uint16x8_t vcltzq_f16(float16x8_t __p0) { - uint16x8_t __ret; - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint16x8_t) __builtin_neon_vcltzq_v((int8x16_t)__rev0, 49); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vcltz_f16(float16x4_t __p0) { - uint16x4_t __ret; - __ret = (uint16x4_t) __builtin_neon_vcltz_v((int8x8_t)__p0, 17); - return __ret; -} -#else -__ai uint16x4_t vcltz_f16(float16x4_t __p0) { - uint16x4_t __ret; - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - __ret = (uint16x4_t) __builtin_neon_vcltz_v((int8x8_t)__rev0, 17); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16x8_t vcvtq_f16_u16(uint16x8_t __p0) { - float16x8_t __ret; - __ret = (float16x8_t) __builtin_neon_vcvtq_f16_v((int8x16_t)__p0, 49); - return __ret; -} -#else -__ai float16x8_t vcvtq_f16_u16(uint16x8_t __p0) { - float16x8_t __ret; - uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (float16x8_t) __builtin_neon_vcvtq_f16_v((int8x16_t)__rev0, 49); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16x8_t vcvtq_f16_s16(int16x8_t __p0) { - float16x8_t __ret; - __ret = (float16x8_t) __builtin_neon_vcvtq_f16_v((int8x16_t)__p0, 33); - return __ret; -} -#else -__ai float16x8_t vcvtq_f16_s16(int16x8_t __p0) { - float16x8_t __ret; - int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (float16x8_t) __builtin_neon_vcvtq_f16_v((int8x16_t)__rev0, 33); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16x4_t vcvt_f16_u16(uint16x4_t __p0) { - float16x4_t __ret; - __ret = (float16x4_t) __builtin_neon_vcvt_f16_v((int8x8_t)__p0, 17); - return __ret; -} -#else -__ai float16x4_t vcvt_f16_u16(uint16x4_t __p0) { - float16x4_t __ret; - uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - __ret = (float16x4_t) __builtin_neon_vcvt_f16_v((int8x8_t)__rev0, 17); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16x4_t vcvt_f16_s16(int16x4_t __p0) { - float16x4_t __ret; - __ret = (float16x4_t) __builtin_neon_vcvt_f16_v((int8x8_t)__p0, 1); - return __ret; -} -#else -__ai float16x4_t vcvt_f16_s16(int16x4_t __p0) { - float16x4_t __ret; - int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - __ret = (float16x4_t) __builtin_neon_vcvt_f16_v((int8x8_t)__rev0, 1); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcvtq_n_f16_u16(__p0, __p1) __extension__ ({ \ - float16x8_t __ret; \ - uint16x8_t __s0 = __p0; \ - __ret = (float16x8_t) __builtin_neon_vcvtq_n_f16_v((int8x16_t)__s0, __p1, 49); \ - __ret; \ -}) -#else -#define vcvtq_n_f16_u16(__p0, __p1) __extension__ ({ \ - float16x8_t __ret; \ - uint16x8_t __s0 = __p0; \ - uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret = (float16x8_t) __builtin_neon_vcvtq_n_f16_v((int8x16_t)__rev0, __p1, 49); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcvtq_n_f16_s16(__p0, __p1) __extension__ ({ \ - float16x8_t __ret; \ - int16x8_t __s0 = __p0; \ - __ret = (float16x8_t) __builtin_neon_vcvtq_n_f16_v((int8x16_t)__s0, __p1, 33); \ - __ret; \ -}) -#else -#define vcvtq_n_f16_s16(__p0, __p1) __extension__ ({ \ - float16x8_t __ret; \ - int16x8_t __s0 = __p0; \ - int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret = (float16x8_t) __builtin_neon_vcvtq_n_f16_v((int8x16_t)__rev0, __p1, 33); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcvt_n_f16_u16(__p0, __p1) __extension__ ({ \ - float16x4_t __ret; \ - uint16x4_t __s0 = __p0; \ - __ret = (float16x4_t) __builtin_neon_vcvt_n_f16_v((int8x8_t)__s0, __p1, 17); \ - __ret; \ -}) -#else -#define vcvt_n_f16_u16(__p0, __p1) __extension__ ({ \ - float16x4_t __ret; \ - uint16x4_t __s0 = __p0; \ - uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - __ret = (float16x4_t) __builtin_neon_vcvt_n_f16_v((int8x8_t)__rev0, __p1, 17); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcvt_n_f16_s16(__p0, __p1) __extension__ ({ \ - float16x4_t __ret; \ - int16x4_t __s0 = __p0; \ - __ret = (float16x4_t) __builtin_neon_vcvt_n_f16_v((int8x8_t)__s0, __p1, 1); \ - __ret; \ -}) -#else -#define vcvt_n_f16_s16(__p0, __p1) __extension__ ({ \ - float16x4_t __ret; \ - int16x4_t __s0 = __p0; \ - int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - __ret = (float16x4_t) __builtin_neon_vcvt_n_f16_v((int8x8_t)__rev0, __p1, 1); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcvtq_n_s16_f16(__p0, __p1) __extension__ ({ \ - int16x8_t __ret; \ - float16x8_t __s0 = __p0; \ - __ret = (int16x8_t) __builtin_neon_vcvtq_n_s16_v((int8x16_t)__s0, __p1, 33); \ - __ret; \ -}) -#else -#define vcvtq_n_s16_f16(__p0, __p1) __extension__ ({ \ - int16x8_t __ret; \ - float16x8_t __s0 = __p0; \ - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret = (int16x8_t) __builtin_neon_vcvtq_n_s16_v((int8x16_t)__rev0, __p1, 33); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcvt_n_s16_f16(__p0, __p1) __extension__ ({ \ - int16x4_t __ret; \ - float16x4_t __s0 = __p0; \ - __ret = (int16x4_t) __builtin_neon_vcvt_n_s16_v((int8x8_t)__s0, __p1, 1); \ - __ret; \ -}) -#else -#define vcvt_n_s16_f16(__p0, __p1) __extension__ ({ \ - int16x4_t __ret; \ - float16x4_t __s0 = __p0; \ - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - __ret = (int16x4_t) __builtin_neon_vcvt_n_s16_v((int8x8_t)__rev0, __p1, 1); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcvtq_n_u16_f16(__p0, __p1) __extension__ ({ \ - uint16x8_t __ret; \ - float16x8_t __s0 = __p0; \ - __ret = (uint16x8_t) __builtin_neon_vcvtq_n_u16_v((int8x16_t)__s0, __p1, 49); \ - __ret; \ -}) -#else -#define vcvtq_n_u16_f16(__p0, __p1) __extension__ ({ \ - uint16x8_t __ret; \ - float16x8_t __s0 = __p0; \ - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret = (uint16x8_t) __builtin_neon_vcvtq_n_u16_v((int8x16_t)__rev0, __p1, 49); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcvt_n_u16_f16(__p0, __p1) __extension__ ({ \ - uint16x4_t __ret; \ - float16x4_t __s0 = __p0; \ - __ret = (uint16x4_t) __builtin_neon_vcvt_n_u16_v((int8x8_t)__s0, __p1, 17); \ - __ret; \ -}) -#else -#define vcvt_n_u16_f16(__p0, __p1) __extension__ ({ \ - uint16x4_t __ret; \ - float16x4_t __s0 = __p0; \ - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - __ret = (uint16x4_t) __builtin_neon_vcvt_n_u16_v((int8x8_t)__rev0, __p1, 17); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vcvtq_s16_f16(float16x8_t __p0) { - int16x8_t __ret; - __ret = (int16x8_t) __builtin_neon_vcvtq_s16_v((int8x16_t)__p0, 33); - return __ret; -} -#else -__ai int16x8_t vcvtq_s16_f16(float16x8_t __p0) { - int16x8_t __ret; - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (int16x8_t) __builtin_neon_vcvtq_s16_v((int8x16_t)__rev0, 33); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16x4_t vcvt_s16_f16(float16x4_t __p0) { - int16x4_t __ret; - __ret = (int16x4_t) __builtin_neon_vcvt_s16_v((int8x8_t)__p0, 1); - return __ret; -} -#else -__ai int16x4_t vcvt_s16_f16(float16x4_t __p0) { - int16x4_t __ret; - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - __ret = (int16x4_t) __builtin_neon_vcvt_s16_v((int8x8_t)__rev0, 1); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vcvtq_u16_f16(float16x8_t __p0) { - uint16x8_t __ret; - __ret = (uint16x8_t) __builtin_neon_vcvtq_u16_v((int8x16_t)__p0, 49); - return __ret; -} -#else -__ai uint16x8_t vcvtq_u16_f16(float16x8_t __p0) { - uint16x8_t __ret; - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint16x8_t) __builtin_neon_vcvtq_u16_v((int8x16_t)__rev0, 49); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vcvt_u16_f16(float16x4_t __p0) { - uint16x4_t __ret; - __ret = (uint16x4_t) __builtin_neon_vcvt_u16_v((int8x8_t)__p0, 17); - return __ret; -} -#else -__ai uint16x4_t vcvt_u16_f16(float16x4_t __p0) { - uint16x4_t __ret; - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - __ret = (uint16x4_t) __builtin_neon_vcvt_u16_v((int8x8_t)__rev0, 17); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vcvtaq_s16_f16(float16x8_t __p0) { - int16x8_t __ret; - __ret = (int16x8_t) __builtin_neon_vcvtaq_s16_v((int8x16_t)__p0, 33); - return __ret; -} -#else -__ai int16x8_t vcvtaq_s16_f16(float16x8_t __p0) { - int16x8_t __ret; - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (int16x8_t) __builtin_neon_vcvtaq_s16_v((int8x16_t)__rev0, 33); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16x4_t vcvta_s16_f16(float16x4_t __p0) { - int16x4_t __ret; - __ret = (int16x4_t) __builtin_neon_vcvta_s16_v((int8x8_t)__p0, 1); - return __ret; -} -#else -__ai int16x4_t vcvta_s16_f16(float16x4_t __p0) { - int16x4_t __ret; - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - __ret = (int16x4_t) __builtin_neon_vcvta_s16_v((int8x8_t)__rev0, 1); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vcvtaq_u16_f16(float16x8_t __p0) { - uint16x8_t __ret; - __ret = (uint16x8_t) __builtin_neon_vcvtaq_u16_v((int8x16_t)__p0, 49); - return __ret; -} -#else -__ai uint16x8_t vcvtaq_u16_f16(float16x8_t __p0) { - uint16x8_t __ret; - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint16x8_t) __builtin_neon_vcvtaq_u16_v((int8x16_t)__rev0, 49); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vcvta_u16_f16(float16x4_t __p0) { - uint16x4_t __ret; - __ret = (uint16x4_t) __builtin_neon_vcvta_u16_v((int8x8_t)__p0, 17); - return __ret; -} -#else -__ai uint16x4_t vcvta_u16_f16(float16x4_t __p0) { - uint16x4_t __ret; - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - __ret = (uint16x4_t) __builtin_neon_vcvta_u16_v((int8x8_t)__rev0, 17); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vcvtmq_s16_f16(float16x8_t __p0) { - int16x8_t __ret; - __ret = (int16x8_t) __builtin_neon_vcvtmq_s16_v((int8x16_t)__p0, 33); - return __ret; -} -#else -__ai int16x8_t vcvtmq_s16_f16(float16x8_t __p0) { - int16x8_t __ret; - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (int16x8_t) __builtin_neon_vcvtmq_s16_v((int8x16_t)__rev0, 33); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16x4_t vcvtm_s16_f16(float16x4_t __p0) { - int16x4_t __ret; - __ret = (int16x4_t) __builtin_neon_vcvtm_s16_v((int8x8_t)__p0, 1); - return __ret; -} -#else -__ai int16x4_t vcvtm_s16_f16(float16x4_t __p0) { - int16x4_t __ret; - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - __ret = (int16x4_t) __builtin_neon_vcvtm_s16_v((int8x8_t)__rev0, 1); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vcvtmq_u16_f16(float16x8_t __p0) { - uint16x8_t __ret; - __ret = (uint16x8_t) __builtin_neon_vcvtmq_u16_v((int8x16_t)__p0, 49); - return __ret; -} -#else -__ai uint16x8_t vcvtmq_u16_f16(float16x8_t __p0) { - uint16x8_t __ret; - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint16x8_t) __builtin_neon_vcvtmq_u16_v((int8x16_t)__rev0, 49); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vcvtm_u16_f16(float16x4_t __p0) { - uint16x4_t __ret; - __ret = (uint16x4_t) __builtin_neon_vcvtm_u16_v((int8x8_t)__p0, 17); - return __ret; -} -#else -__ai uint16x4_t vcvtm_u16_f16(float16x4_t __p0) { - uint16x4_t __ret; - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - __ret = (uint16x4_t) __builtin_neon_vcvtm_u16_v((int8x8_t)__rev0, 17); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vcvtnq_s16_f16(float16x8_t __p0) { - int16x8_t __ret; - __ret = (int16x8_t) __builtin_neon_vcvtnq_s16_v((int8x16_t)__p0, 33); - return __ret; -} -#else -__ai int16x8_t vcvtnq_s16_f16(float16x8_t __p0) { - int16x8_t __ret; - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (int16x8_t) __builtin_neon_vcvtnq_s16_v((int8x16_t)__rev0, 33); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16x4_t vcvtn_s16_f16(float16x4_t __p0) { - int16x4_t __ret; - __ret = (int16x4_t) __builtin_neon_vcvtn_s16_v((int8x8_t)__p0, 1); - return __ret; -} -#else -__ai int16x4_t vcvtn_s16_f16(float16x4_t __p0) { - int16x4_t __ret; - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - __ret = (int16x4_t) __builtin_neon_vcvtn_s16_v((int8x8_t)__rev0, 1); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vcvtnq_u16_f16(float16x8_t __p0) { - uint16x8_t __ret; - __ret = (uint16x8_t) __builtin_neon_vcvtnq_u16_v((int8x16_t)__p0, 49); - return __ret; -} -#else -__ai uint16x8_t vcvtnq_u16_f16(float16x8_t __p0) { - uint16x8_t __ret; - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint16x8_t) __builtin_neon_vcvtnq_u16_v((int8x16_t)__rev0, 49); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vcvtn_u16_f16(float16x4_t __p0) { - uint16x4_t __ret; - __ret = (uint16x4_t) __builtin_neon_vcvtn_u16_v((int8x8_t)__p0, 17); - return __ret; -} -#else -__ai uint16x4_t vcvtn_u16_f16(float16x4_t __p0) { - uint16x4_t __ret; - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - __ret = (uint16x4_t) __builtin_neon_vcvtn_u16_v((int8x8_t)__rev0, 17); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vcvtpq_s16_f16(float16x8_t __p0) { - int16x8_t __ret; - __ret = (int16x8_t) __builtin_neon_vcvtpq_s16_v((int8x16_t)__p0, 33); - return __ret; -} -#else -__ai int16x8_t vcvtpq_s16_f16(float16x8_t __p0) { - int16x8_t __ret; - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (int16x8_t) __builtin_neon_vcvtpq_s16_v((int8x16_t)__rev0, 33); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16x4_t vcvtp_s16_f16(float16x4_t __p0) { - int16x4_t __ret; - __ret = (int16x4_t) __builtin_neon_vcvtp_s16_v((int8x8_t)__p0, 1); - return __ret; -} -#else -__ai int16x4_t vcvtp_s16_f16(float16x4_t __p0) { - int16x4_t __ret; - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - __ret = (int16x4_t) __builtin_neon_vcvtp_s16_v((int8x8_t)__rev0, 1); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vcvtpq_u16_f16(float16x8_t __p0) { - uint16x8_t __ret; - __ret = (uint16x8_t) __builtin_neon_vcvtpq_u16_v((int8x16_t)__p0, 49); - return __ret; -} -#else -__ai uint16x8_t vcvtpq_u16_f16(float16x8_t __p0) { - uint16x8_t __ret; - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint16x8_t) __builtin_neon_vcvtpq_u16_v((int8x16_t)__rev0, 49); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vcvtp_u16_f16(float16x4_t __p0) { - uint16x4_t __ret; - __ret = (uint16x4_t) __builtin_neon_vcvtp_u16_v((int8x8_t)__p0, 17); - return __ret; -} -#else -__ai uint16x4_t vcvtp_u16_f16(float16x4_t __p0) { - uint16x4_t __ret; - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - __ret = (uint16x4_t) __builtin_neon_vcvtp_u16_v((int8x8_t)__rev0, 17); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vextq_f16(__p0, __p1, __p2) __extension__ ({ \ - float16x8_t __ret; \ - float16x8_t __s0 = __p0; \ - float16x8_t __s1 = __p1; \ - __ret = (float16x8_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 40); \ - __ret; \ -}) -#else -#define vextq_f16(__p0, __p1, __p2) __extension__ ({ \ - float16x8_t __ret; \ - float16x8_t __s0 = __p0; \ - float16x8_t __s1 = __p1; \ - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - float16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret = (float16x8_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 40); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vext_f16(__p0, __p1, __p2) __extension__ ({ \ - float16x4_t __ret; \ - float16x4_t __s0 = __p0; \ - float16x4_t __s1 = __p1; \ - __ret = (float16x4_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 8); \ - __ret; \ -}) -#else -#define vext_f16(__p0, __p1, __p2) __extension__ ({ \ - float16x4_t __ret; \ - float16x4_t __s0 = __p0; \ - float16x4_t __s1 = __p1; \ - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - float16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ - __ret = (float16x4_t) __builtin_neon_vext_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 8); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16x8_t vfmaq_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) { - float16x8_t __ret; - __ret = (float16x8_t) __builtin_neon_vfmaq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 40); - return __ret; -} -#else -__ai float16x8_t vfmaq_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) { - float16x8_t __ret; - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - float16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (float16x8_t) __builtin_neon_vfmaq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 40); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -__ai float16x8_t __noswap_vfmaq_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) { - float16x8_t __ret; - __ret = (float16x8_t) __builtin_neon_vfmaq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 40); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16x4_t vfma_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) { - float16x4_t __ret; - __ret = (float16x4_t) __builtin_neon_vfma_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 8); - return __ret; -} -#else -__ai float16x4_t vfma_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) { - float16x4_t __ret; - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - float16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); - __ret = (float16x4_t) __builtin_neon_vfma_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 8); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -__ai float16x4_t __noswap_vfma_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) { - float16x4_t __ret; - __ret = (float16x4_t) __builtin_neon_vfma_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 8); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16x8_t vfmsq_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) { - float16x8_t __ret; - __ret = vfmaq_f16(__p0, -__p1, __p2); - return __ret; -} -#else -__ai float16x8_t vfmsq_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) { - float16x8_t __ret; - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - float16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __noswap_vfmaq_f16(__rev0, -__rev1, __rev2); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16x4_t vfms_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) { - float16x4_t __ret; - __ret = vfma_f16(__p0, -__p1, __p2); - return __ret; -} -#else -__ai float16x4_t vfms_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) { - float16x4_t __ret; - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - float16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); - __ret = __noswap_vfma_f16(__rev0, -__rev1, __rev2); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16x8_t vmaxq_f16(float16x8_t __p0, float16x8_t __p1) { - float16x8_t __ret; - __ret = (float16x8_t) __builtin_neon_vmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 40); - return __ret; -} -#else -__ai float16x8_t vmaxq_f16(float16x8_t __p0, float16x8_t __p1) { - float16x8_t __ret; - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (float16x8_t) __builtin_neon_vmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 40); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16x4_t vmax_f16(float16x4_t __p0, float16x4_t __p1) { - float16x4_t __ret; - __ret = (float16x4_t) __builtin_neon_vmax_v((int8x8_t)__p0, (int8x8_t)__p1, 8); - return __ret; -} -#else -__ai float16x4_t vmax_f16(float16x4_t __p0, float16x4_t __p1) { - float16x4_t __ret; - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = (float16x4_t) __builtin_neon_vmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 8); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16x8_t vminq_f16(float16x8_t __p0, float16x8_t __p1) { - float16x8_t __ret; - __ret = (float16x8_t) __builtin_neon_vminq_v((int8x16_t)__p0, (int8x16_t)__p1, 40); - return __ret; -} -#else -__ai float16x8_t vminq_f16(float16x8_t __p0, float16x8_t __p1) { - float16x8_t __ret; - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (float16x8_t) __builtin_neon_vminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 40); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16x4_t vmin_f16(float16x4_t __p0, float16x4_t __p1) { - float16x4_t __ret; - __ret = (float16x4_t) __builtin_neon_vmin_v((int8x8_t)__p0, (int8x8_t)__p1, 8); - return __ret; -} -#else -__ai float16x4_t vmin_f16(float16x4_t __p0, float16x4_t __p1) { - float16x4_t __ret; - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = (float16x4_t) __builtin_neon_vmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 8); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16x8_t vmulq_f16(float16x8_t __p0, float16x8_t __p1) { - float16x8_t __ret; - __ret = __p0 * __p1; - return __ret; -} -#else -__ai float16x8_t vmulq_f16(float16x8_t __p0, float16x8_t __p1) { - float16x8_t __ret; - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __rev0 * __rev1; - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16x4_t vmul_f16(float16x4_t __p0, float16x4_t __p1) { - float16x4_t __ret; - __ret = __p0 * __p1; - return __ret; -} -#else -__ai float16x4_t vmul_f16(float16x4_t __p0, float16x4_t __p1) { - float16x4_t __ret; - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = __rev0 * __rev1; - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmulq_lane_f16(__p0_277, __p1_277, __p2_277) __extension__ ({ \ - float16x8_t __ret_277; \ - float16x8_t __s0_277 = __p0_277; \ - float16x4_t __s1_277 = __p1_277; \ - __ret_277 = __s0_277 * splatq_lane_f16(__s1_277, __p2_277); \ - __ret_277; \ -}) -#else -#define vmulq_lane_f16(__p0_278, __p1_278, __p2_278) __extension__ ({ \ - float16x8_t __ret_278; \ - float16x8_t __s0_278 = __p0_278; \ - float16x4_t __s1_278 = __p1_278; \ - float16x8_t __rev0_278; __rev0_278 = __builtin_shufflevector(__s0_278, __s0_278, 7, 6, 5, 4, 3, 2, 1, 0); \ - float16x4_t __rev1_278; __rev1_278 = __builtin_shufflevector(__s1_278, __s1_278, 3, 2, 1, 0); \ - __ret_278 = __rev0_278 * __noswap_splatq_lane_f16(__rev1_278, __p2_278); \ - __ret_278 = __builtin_shufflevector(__ret_278, __ret_278, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_278; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmul_lane_f16(__p0_279, __p1_279, __p2_279) __extension__ ({ \ - float16x4_t __ret_279; \ - float16x4_t __s0_279 = __p0_279; \ - float16x4_t __s1_279 = __p1_279; \ - __ret_279 = __s0_279 * splat_lane_f16(__s1_279, __p2_279); \ - __ret_279; \ -}) -#else -#define vmul_lane_f16(__p0_280, __p1_280, __p2_280) __extension__ ({ \ - float16x4_t __ret_280; \ - float16x4_t __s0_280 = __p0_280; \ - float16x4_t __s1_280 = __p1_280; \ - float16x4_t __rev0_280; __rev0_280 = __builtin_shufflevector(__s0_280, __s0_280, 3, 2, 1, 0); \ - float16x4_t __rev1_280; __rev1_280 = __builtin_shufflevector(__s1_280, __s1_280, 3, 2, 1, 0); \ - __ret_280 = __rev0_280 * __noswap_splat_lane_f16(__rev1_280, __p2_280); \ - __ret_280 = __builtin_shufflevector(__ret_280, __ret_280, 3, 2, 1, 0); \ - __ret_280; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmulq_n_f16(__p0, __p1) __extension__ ({ \ - float16x8_t __ret; \ - float16x8_t __s0 = __p0; \ - float16_t __s1 = __p1; \ - __ret = __s0 * (float16x8_t) {__s1, __s1, __s1, __s1, __s1, __s1, __s1, __s1}; \ - __ret; \ -}) -#else -#define vmulq_n_f16(__p0, __p1) __extension__ ({ \ - float16x8_t __ret; \ - float16x8_t __s0 = __p0; \ - float16_t __s1 = __p1; \ - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret = __rev0 * (float16x8_t) {__s1, __s1, __s1, __s1, __s1, __s1, __s1, __s1}; \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmul_n_f16(__p0, __p1) __extension__ ({ \ - float16x4_t __ret; \ - float16x4_t __s0 = __p0; \ - float16_t __s1 = __p1; \ - __ret = __s0 * (float16x4_t) {__s1, __s1, __s1, __s1}; \ - __ret; \ -}) -#else -#define vmul_n_f16(__p0, __p1) __extension__ ({ \ - float16x4_t __ret; \ - float16x4_t __s0 = __p0; \ - float16_t __s1 = __p1; \ - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - __ret = __rev0 * (float16x4_t) {__s1, __s1, __s1, __s1}; \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16x8_t vnegq_f16(float16x8_t __p0) { - float16x8_t __ret; - __ret = -__p0; - return __ret; -} -#else -__ai float16x8_t vnegq_f16(float16x8_t __p0) { - float16x8_t __ret; - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = -__rev0; - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16x4_t vneg_f16(float16x4_t __p0) { - float16x4_t __ret; - __ret = -__p0; - return __ret; -} -#else -__ai float16x4_t vneg_f16(float16x4_t __p0) { - float16x4_t __ret; - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - __ret = -__rev0; - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16x4_t vpadd_f16(float16x4_t __p0, float16x4_t __p1) { - float16x4_t __ret; - __ret = (float16x4_t) __builtin_neon_vpadd_v((int8x8_t)__p0, (int8x8_t)__p1, 8); - return __ret; -} -#else -__ai float16x4_t vpadd_f16(float16x4_t __p0, float16x4_t __p1) { - float16x4_t __ret; - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = (float16x4_t) __builtin_neon_vpadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 8); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16x4_t vpmax_f16(float16x4_t __p0, float16x4_t __p1) { - float16x4_t __ret; - __ret = (float16x4_t) __builtin_neon_vpmax_v((int8x8_t)__p0, (int8x8_t)__p1, 8); - return __ret; -} -#else -__ai float16x4_t vpmax_f16(float16x4_t __p0, float16x4_t __p1) { - float16x4_t __ret; - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = (float16x4_t) __builtin_neon_vpmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 8); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16x4_t vpmin_f16(float16x4_t __p0, float16x4_t __p1) { - float16x4_t __ret; - __ret = (float16x4_t) __builtin_neon_vpmin_v((int8x8_t)__p0, (int8x8_t)__p1, 8); - return __ret; -} -#else -__ai float16x4_t vpmin_f16(float16x4_t __p0, float16x4_t __p1) { - float16x4_t __ret; - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = (float16x4_t) __builtin_neon_vpmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 8); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16x8_t vrecpeq_f16(float16x8_t __p0) { - float16x8_t __ret; - __ret = (float16x8_t) __builtin_neon_vrecpeq_v((int8x16_t)__p0, 40); - return __ret; -} -#else -__ai float16x8_t vrecpeq_f16(float16x8_t __p0) { - float16x8_t __ret; - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (float16x8_t) __builtin_neon_vrecpeq_v((int8x16_t)__rev0, 40); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16x4_t vrecpe_f16(float16x4_t __p0) { - float16x4_t __ret; - __ret = (float16x4_t) __builtin_neon_vrecpe_v((int8x8_t)__p0, 8); - return __ret; -} -#else -__ai float16x4_t vrecpe_f16(float16x4_t __p0) { - float16x4_t __ret; - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - __ret = (float16x4_t) __builtin_neon_vrecpe_v((int8x8_t)__rev0, 8); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16x8_t vrecpsq_f16(float16x8_t __p0, float16x8_t __p1) { - float16x8_t __ret; - __ret = (float16x8_t) __builtin_neon_vrecpsq_v((int8x16_t)__p0, (int8x16_t)__p1, 40); - return __ret; -} -#else -__ai float16x8_t vrecpsq_f16(float16x8_t __p0, float16x8_t __p1) { - float16x8_t __ret; - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (float16x8_t) __builtin_neon_vrecpsq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 40); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16x4_t vrecps_f16(float16x4_t __p0, float16x4_t __p1) { - float16x4_t __ret; - __ret = (float16x4_t) __builtin_neon_vrecps_v((int8x8_t)__p0, (int8x8_t)__p1, 8); - return __ret; -} -#else -__ai float16x4_t vrecps_f16(float16x4_t __p0, float16x4_t __p1) { - float16x4_t __ret; - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = (float16x4_t) __builtin_neon_vrecps_v((int8x8_t)__rev0, (int8x8_t)__rev1, 8); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16x8_t vrev64q_f16(float16x8_t __p0) { - float16x8_t __ret; - __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0, 7, 6, 5, 4); - return __ret; -} -#else -__ai float16x8_t vrev64q_f16(float16x8_t __p0) { - float16x8_t __ret; - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0, 7, 6, 5, 4); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16x4_t vrev64_f16(float16x4_t __p0) { - float16x4_t __ret; - __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - return __ret; -} -#else -__ai float16x4_t vrev64_f16(float16x4_t __p0) { - float16x4_t __ret; - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16x8_t vrsqrteq_f16(float16x8_t __p0) { - float16x8_t __ret; - __ret = (float16x8_t) __builtin_neon_vrsqrteq_v((int8x16_t)__p0, 40); - return __ret; -} -#else -__ai float16x8_t vrsqrteq_f16(float16x8_t __p0) { - float16x8_t __ret; - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (float16x8_t) __builtin_neon_vrsqrteq_v((int8x16_t)__rev0, 40); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16x4_t vrsqrte_f16(float16x4_t __p0) { - float16x4_t __ret; - __ret = (float16x4_t) __builtin_neon_vrsqrte_v((int8x8_t)__p0, 8); - return __ret; -} -#else -__ai float16x4_t vrsqrte_f16(float16x4_t __p0) { - float16x4_t __ret; - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - __ret = (float16x4_t) __builtin_neon_vrsqrte_v((int8x8_t)__rev0, 8); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16x8_t vrsqrtsq_f16(float16x8_t __p0, float16x8_t __p1) { - float16x8_t __ret; - __ret = (float16x8_t) __builtin_neon_vrsqrtsq_v((int8x16_t)__p0, (int8x16_t)__p1, 40); - return __ret; -} -#else -__ai float16x8_t vrsqrtsq_f16(float16x8_t __p0, float16x8_t __p1) { - float16x8_t __ret; - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (float16x8_t) __builtin_neon_vrsqrtsq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 40); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16x4_t vrsqrts_f16(float16x4_t __p0, float16x4_t __p1) { - float16x4_t __ret; - __ret = (float16x4_t) __builtin_neon_vrsqrts_v((int8x8_t)__p0, (int8x8_t)__p1, 8); - return __ret; -} -#else -__ai float16x4_t vrsqrts_f16(float16x4_t __p0, float16x4_t __p1) { - float16x4_t __ret; - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = (float16x4_t) __builtin_neon_vrsqrts_v((int8x8_t)__rev0, (int8x8_t)__rev1, 8); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16x8_t vsubq_f16(float16x8_t __p0, float16x8_t __p1) { - float16x8_t __ret; - __ret = __p0 - __p1; - return __ret; -} -#else -__ai float16x8_t vsubq_f16(float16x8_t __p0, float16x8_t __p1) { - float16x8_t __ret; - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __rev0 - __rev1; - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16x4_t vsub_f16(float16x4_t __p0, float16x4_t __p1) { - float16x4_t __ret; - __ret = __p0 - __p1; - return __ret; -} -#else -__ai float16x4_t vsub_f16(float16x4_t __p0, float16x4_t __p1) { - float16x4_t __ret; - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = __rev0 - __rev1; - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16x8x2_t vtrnq_f16(float16x8_t __p0, float16x8_t __p1) { - float16x8x2_t __ret; - __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 40); - return __ret; -} -#else -__ai float16x8x2_t vtrnq_f16(float16x8_t __p0, float16x8_t __p1) { - float16x8x2_t __ret; - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 40); - - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16x4x2_t vtrn_f16(float16x4_t __p0, float16x4_t __p1) { - float16x4x2_t __ret; - __builtin_neon_vtrn_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 8); - return __ret; -} -#else -__ai float16x4x2_t vtrn_f16(float16x4_t __p0, float16x4_t __p1) { - float16x4x2_t __ret; - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __builtin_neon_vtrn_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 8); - - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16x8x2_t vuzpq_f16(float16x8_t __p0, float16x8_t __p1) { - float16x8x2_t __ret; - __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 40); - return __ret; -} -#else -__ai float16x8x2_t vuzpq_f16(float16x8_t __p0, float16x8_t __p1) { - float16x8x2_t __ret; - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 40); - - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16x4x2_t vuzp_f16(float16x4_t __p0, float16x4_t __p1) { - float16x4x2_t __ret; - __builtin_neon_vuzp_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 8); - return __ret; -} -#else -__ai float16x4x2_t vuzp_f16(float16x4_t __p0, float16x4_t __p1) { - float16x4x2_t __ret; - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __builtin_neon_vuzp_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 8); - - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16x8x2_t vzipq_f16(float16x8_t __p0, float16x8_t __p1) { - float16x8x2_t __ret; - __builtin_neon_vzipq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 40); - return __ret; -} -#else -__ai float16x8x2_t vzipq_f16(float16x8_t __p0, float16x8_t __p1) { - float16x8x2_t __ret; - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __builtin_neon_vzipq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 40); - - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16x4x2_t vzip_f16(float16x4_t __p0, float16x4_t __p1) { - float16x4x2_t __ret; - __builtin_neon_vzip_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 8); - return __ret; -} -#else -__ai float16x4x2_t vzip_f16(float16x4_t __p0, float16x4_t __p1) { - float16x4x2_t __ret; - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __builtin_neon_vzip_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 8); - - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); - return __ret; -} -#endif - -#endif -#if defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC) && defined(__aarch64__) -#ifdef __LITTLE_ENDIAN__ -__ai float16x8_t vdivq_f16(float16x8_t __p0, float16x8_t __p1) { - float16x8_t __ret; - __ret = __p0 / __p1; - return __ret; -} -#else -__ai float16x8_t vdivq_f16(float16x8_t __p0, float16x8_t __p1) { - float16x8_t __ret; - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __rev0 / __rev1; - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16x4_t vdiv_f16(float16x4_t __p0, float16x4_t __p1) { - float16x4_t __ret; - __ret = __p0 / __p1; - return __ret; -} -#else -__ai float16x4_t vdiv_f16(float16x4_t __p0, float16x4_t __p1) { - float16x4_t __ret; - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = __rev0 / __rev1; - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vduph_lane_f16(__p0, __p1) __extension__ ({ \ - float16_t __ret; \ - float16x4_t __s0 = __p0; \ - __ret = (float16_t) __builtin_neon_vduph_lane_f16((float16x4_t)__s0, __p1); \ - __ret; \ -}) -#else -#define vduph_lane_f16(__p0, __p1) __extension__ ({ \ - float16_t __ret; \ - float16x4_t __s0 = __p0; \ - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - __ret = (float16_t) __builtin_neon_vduph_lane_f16((float16x4_t)__rev0, __p1); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vduph_laneq_f16(__p0, __p1) __extension__ ({ \ - float16_t __ret; \ - float16x8_t __s0 = __p0; \ - __ret = (float16_t) __builtin_neon_vduph_laneq_f16((float16x8_t)__s0, __p1); \ - __ret; \ -}) -#else -#define vduph_laneq_f16(__p0, __p1) __extension__ ({ \ - float16_t __ret; \ - float16x8_t __s0 = __p0; \ - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret = (float16_t) __builtin_neon_vduph_laneq_f16((float16x8_t)__rev0, __p1); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vfmah_lane_f16(__p0, __p1, __p2, __p3) __extension__ ({ \ - float16_t __ret; \ - float16_t __s0 = __p0; \ - float16_t __s1 = __p1; \ - float16x4_t __s2 = __p2; \ - __ret = (float16_t) __builtin_neon_vfmah_lane_f16(__s0, __s1, (float16x4_t)__s2, __p3); \ - __ret; \ -}) -#else -#define vfmah_lane_f16(__p0, __p1, __p2, __p3) __extension__ ({ \ - float16_t __ret; \ - float16_t __s0 = __p0; \ - float16_t __s1 = __p1; \ - float16x4_t __s2 = __p2; \ - float16x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \ - __ret = (float16_t) __builtin_neon_vfmah_lane_f16(__s0, __s1, (float16x4_t)__rev2, __p3); \ - __ret; \ -}) -#define __noswap_vfmah_lane_f16(__p0, __p1, __p2, __p3) __extension__ ({ \ - float16_t __ret; \ - float16_t __s0 = __p0; \ - float16_t __s1 = __p1; \ - float16x4_t __s2 = __p2; \ - __ret = (float16_t) __builtin_neon_vfmah_lane_f16(__s0, __s1, (float16x4_t)__s2, __p3); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vfmaq_lane_f16(__p0, __p1, __p2, __p3) __extension__ ({ \ - float16x8_t __ret; \ - float16x8_t __s0 = __p0; \ - float16x8_t __s1 = __p1; \ - float16x4_t __s2 = __p2; \ - __ret = (float16x8_t) __builtin_neon_vfmaq_lane_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x8_t)__s2, __p3, 40); \ - __ret; \ -}) -#else -#define vfmaq_lane_f16(__p0, __p1, __p2, __p3) __extension__ ({ \ - float16x8_t __ret; \ - float16x8_t __s0 = __p0; \ - float16x8_t __s1 = __p1; \ - float16x4_t __s2 = __p2; \ - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - float16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ - float16x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \ - __ret = (float16x8_t) __builtin_neon_vfmaq_lane_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x8_t)__rev2, __p3, 40); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#define __noswap_vfmaq_lane_f16(__p0, __p1, __p2, __p3) __extension__ ({ \ - float16x8_t __ret; \ - float16x8_t __s0 = __p0; \ - float16x8_t __s1 = __p1; \ - float16x4_t __s2 = __p2; \ - __ret = (float16x8_t) __builtin_neon_vfmaq_lane_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x8_t)__s2, __p3, 40); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vfma_lane_f16(__p0, __p1, __p2, __p3) __extension__ ({ \ - float16x4_t __ret; \ - float16x4_t __s0 = __p0; \ - float16x4_t __s1 = __p1; \ - float16x4_t __s2 = __p2; \ - __ret = (float16x4_t) __builtin_neon_vfma_lane_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x8_t)__s2, __p3, 8); \ - __ret; \ -}) -#else -#define vfma_lane_f16(__p0, __p1, __p2, __p3) __extension__ ({ \ - float16x4_t __ret; \ - float16x4_t __s0 = __p0; \ - float16x4_t __s1 = __p1; \ - float16x4_t __s2 = __p2; \ - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - float16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ - float16x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \ - __ret = (float16x4_t) __builtin_neon_vfma_lane_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, __p3, 8); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#define __noswap_vfma_lane_f16(__p0, __p1, __p2, __p3) __extension__ ({ \ - float16x4_t __ret; \ - float16x4_t __s0 = __p0; \ - float16x4_t __s1 = __p1; \ - float16x4_t __s2 = __p2; \ - __ret = (float16x4_t) __builtin_neon_vfma_lane_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x8_t)__s2, __p3, 8); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vfmah_laneq_f16(__p0, __p1, __p2, __p3) __extension__ ({ \ - float16_t __ret; \ - float16_t __s0 = __p0; \ - float16_t __s1 = __p1; \ - float16x8_t __s2 = __p2; \ - __ret = (float16_t) __builtin_neon_vfmah_laneq_f16(__s0, __s1, (float16x8_t)__s2, __p3); \ - __ret; \ -}) -#else -#define vfmah_laneq_f16(__p0, __p1, __p2, __p3) __extension__ ({ \ - float16_t __ret; \ - float16_t __s0 = __p0; \ - float16_t __s1 = __p1; \ - float16x8_t __s2 = __p2; \ - float16x8_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret = (float16_t) __builtin_neon_vfmah_laneq_f16(__s0, __s1, (float16x8_t)__rev2, __p3); \ - __ret; \ -}) -#define __noswap_vfmah_laneq_f16(__p0, __p1, __p2, __p3) __extension__ ({ \ - float16_t __ret; \ - float16_t __s0 = __p0; \ - float16_t __s1 = __p1; \ - float16x8_t __s2 = __p2; \ - __ret = (float16_t) __builtin_neon_vfmah_laneq_f16(__s0, __s1, (float16x8_t)__s2, __p3); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vfmaq_laneq_f16(__p0, __p1, __p2, __p3) __extension__ ({ \ - float16x8_t __ret; \ - float16x8_t __s0 = __p0; \ - float16x8_t __s1 = __p1; \ - float16x8_t __s2 = __p2; \ - __ret = (float16x8_t) __builtin_neon_vfmaq_laneq_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x16_t)__s2, __p3, 40); \ - __ret; \ -}) -#else -#define vfmaq_laneq_f16(__p0, __p1, __p2, __p3) __extension__ ({ \ - float16x8_t __ret; \ - float16x8_t __s0 = __p0; \ - float16x8_t __s1 = __p1; \ - float16x8_t __s2 = __p2; \ - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - float16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ - float16x8_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret = (float16x8_t) __builtin_neon_vfmaq_laneq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, __p3, 40); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#define __noswap_vfmaq_laneq_f16(__p0, __p1, __p2, __p3) __extension__ ({ \ - float16x8_t __ret; \ - float16x8_t __s0 = __p0; \ - float16x8_t __s1 = __p1; \ - float16x8_t __s2 = __p2; \ - __ret = (float16x8_t) __builtin_neon_vfmaq_laneq_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x16_t)__s2, __p3, 40); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vfma_laneq_f16(__p0, __p1, __p2, __p3) __extension__ ({ \ - float16x4_t __ret; \ - float16x4_t __s0 = __p0; \ - float16x4_t __s1 = __p1; \ - float16x8_t __s2 = __p2; \ - __ret = (float16x4_t) __builtin_neon_vfma_laneq_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x16_t)__s2, __p3, 8); \ - __ret; \ -}) -#else -#define vfma_laneq_f16(__p0, __p1, __p2, __p3) __extension__ ({ \ - float16x4_t __ret; \ - float16x4_t __s0 = __p0; \ - float16x4_t __s1 = __p1; \ - float16x8_t __s2 = __p2; \ - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - float16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ - float16x8_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret = (float16x4_t) __builtin_neon_vfma_laneq_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x16_t)__rev2, __p3, 8); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#define __noswap_vfma_laneq_f16(__p0, __p1, __p2, __p3) __extension__ ({ \ - float16x4_t __ret; \ - float16x4_t __s0 = __p0; \ - float16x4_t __s1 = __p1; \ - float16x8_t __s2 = __p2; \ - __ret = (float16x4_t) __builtin_neon_vfma_laneq_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x16_t)__s2, __p3, 8); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vfmaq_n_f16(__p0, __p1, __p2) __extension__ ({ \ - float16x8_t __ret; \ - float16x8_t __s0 = __p0; \ - float16x8_t __s1 = __p1; \ - float16_t __s2 = __p2; \ - __ret = vfmaq_f16(__s0, __s1, (float16x8_t) {__s2, __s2, __s2, __s2, __s2, __s2, __s2, __s2}); \ - __ret; \ -}) -#else -#define vfmaq_n_f16(__p0, __p1, __p2) __extension__ ({ \ - float16x8_t __ret; \ - float16x8_t __s0 = __p0; \ - float16x8_t __s1 = __p1; \ - float16_t __s2 = __p2; \ - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - float16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret = __noswap_vfmaq_f16(__rev0, __rev1, (float16x8_t) {__s2, __s2, __s2, __s2, __s2, __s2, __s2, __s2}); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vfma_n_f16(__p0, __p1, __p2) __extension__ ({ \ - float16x4_t __ret; \ - float16x4_t __s0 = __p0; \ - float16x4_t __s1 = __p1; \ - float16_t __s2 = __p2; \ - __ret = vfma_f16(__s0, __s1, (float16x4_t) {__s2, __s2, __s2, __s2}); \ - __ret; \ -}) -#else -#define vfma_n_f16(__p0, __p1, __p2) __extension__ ({ \ - float16x4_t __ret; \ - float16x4_t __s0 = __p0; \ - float16x4_t __s1 = __p1; \ - float16_t __s2 = __p2; \ - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - float16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ - __ret = __noswap_vfma_f16(__rev0, __rev1, (float16x4_t) {__s2, __s2, __s2, __s2}); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vfmsh_lane_f16(__p0_281, __p1_281, __p2_281, __p3_281) __extension__ ({ \ - float16_t __ret_281; \ - float16_t __s0_281 = __p0_281; \ - float16_t __s1_281 = __p1_281; \ - float16x4_t __s2_281 = __p2_281; \ - __ret_281 = vfmah_lane_f16(__s0_281, -__s1_281, __s2_281, __p3_281); \ - __ret_281; \ -}) -#else -#define vfmsh_lane_f16(__p0_282, __p1_282, __p2_282, __p3_282) __extension__ ({ \ - float16_t __ret_282; \ - float16_t __s0_282 = __p0_282; \ - float16_t __s1_282 = __p1_282; \ - float16x4_t __s2_282 = __p2_282; \ - float16x4_t __rev2_282; __rev2_282 = __builtin_shufflevector(__s2_282, __s2_282, 3, 2, 1, 0); \ - __ret_282 = __noswap_vfmah_lane_f16(__s0_282, -__s1_282, __rev2_282, __p3_282); \ - __ret_282; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vfmsq_lane_f16(__p0_283, __p1_283, __p2_283, __p3_283) __extension__ ({ \ - float16x8_t __ret_283; \ - float16x8_t __s0_283 = __p0_283; \ - float16x8_t __s1_283 = __p1_283; \ - float16x4_t __s2_283 = __p2_283; \ - __ret_283 = vfmaq_lane_f16(__s0_283, -__s1_283, __s2_283, __p3_283); \ - __ret_283; \ -}) -#else -#define vfmsq_lane_f16(__p0_284, __p1_284, __p2_284, __p3_284) __extension__ ({ \ - float16x8_t __ret_284; \ - float16x8_t __s0_284 = __p0_284; \ - float16x8_t __s1_284 = __p1_284; \ - float16x4_t __s2_284 = __p2_284; \ - float16x8_t __rev0_284; __rev0_284 = __builtin_shufflevector(__s0_284, __s0_284, 7, 6, 5, 4, 3, 2, 1, 0); \ - float16x8_t __rev1_284; __rev1_284 = __builtin_shufflevector(__s1_284, __s1_284, 7, 6, 5, 4, 3, 2, 1, 0); \ - float16x4_t __rev2_284; __rev2_284 = __builtin_shufflevector(__s2_284, __s2_284, 3, 2, 1, 0); \ - __ret_284 = __noswap_vfmaq_lane_f16(__rev0_284, -__rev1_284, __rev2_284, __p3_284); \ - __ret_284 = __builtin_shufflevector(__ret_284, __ret_284, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_284; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vfms_lane_f16(__p0_285, __p1_285, __p2_285, __p3_285) __extension__ ({ \ - float16x4_t __ret_285; \ - float16x4_t __s0_285 = __p0_285; \ - float16x4_t __s1_285 = __p1_285; \ - float16x4_t __s2_285 = __p2_285; \ - __ret_285 = vfma_lane_f16(__s0_285, -__s1_285, __s2_285, __p3_285); \ - __ret_285; \ -}) -#else -#define vfms_lane_f16(__p0_286, __p1_286, __p2_286, __p3_286) __extension__ ({ \ - float16x4_t __ret_286; \ - float16x4_t __s0_286 = __p0_286; \ - float16x4_t __s1_286 = __p1_286; \ - float16x4_t __s2_286 = __p2_286; \ - float16x4_t __rev0_286; __rev0_286 = __builtin_shufflevector(__s0_286, __s0_286, 3, 2, 1, 0); \ - float16x4_t __rev1_286; __rev1_286 = __builtin_shufflevector(__s1_286, __s1_286, 3, 2, 1, 0); \ - float16x4_t __rev2_286; __rev2_286 = __builtin_shufflevector(__s2_286, __s2_286, 3, 2, 1, 0); \ - __ret_286 = __noswap_vfma_lane_f16(__rev0_286, -__rev1_286, __rev2_286, __p3_286); \ - __ret_286 = __builtin_shufflevector(__ret_286, __ret_286, 3, 2, 1, 0); \ - __ret_286; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vfmsh_laneq_f16(__p0_287, __p1_287, __p2_287, __p3_287) __extension__ ({ \ - float16_t __ret_287; \ - float16_t __s0_287 = __p0_287; \ - float16_t __s1_287 = __p1_287; \ - float16x8_t __s2_287 = __p2_287; \ - __ret_287 = vfmah_laneq_f16(__s0_287, -__s1_287, __s2_287, __p3_287); \ - __ret_287; \ -}) -#else -#define vfmsh_laneq_f16(__p0_288, __p1_288, __p2_288, __p3_288) __extension__ ({ \ - float16_t __ret_288; \ - float16_t __s0_288 = __p0_288; \ - float16_t __s1_288 = __p1_288; \ - float16x8_t __s2_288 = __p2_288; \ - float16x8_t __rev2_288; __rev2_288 = __builtin_shufflevector(__s2_288, __s2_288, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_288 = __noswap_vfmah_laneq_f16(__s0_288, -__s1_288, __rev2_288, __p3_288); \ - __ret_288; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vfmsq_laneq_f16(__p0_289, __p1_289, __p2_289, __p3_289) __extension__ ({ \ - float16x8_t __ret_289; \ - float16x8_t __s0_289 = __p0_289; \ - float16x8_t __s1_289 = __p1_289; \ - float16x8_t __s2_289 = __p2_289; \ - __ret_289 = vfmaq_laneq_f16(__s0_289, -__s1_289, __s2_289, __p3_289); \ - __ret_289; \ -}) -#else -#define vfmsq_laneq_f16(__p0_290, __p1_290, __p2_290, __p3_290) __extension__ ({ \ - float16x8_t __ret_290; \ - float16x8_t __s0_290 = __p0_290; \ - float16x8_t __s1_290 = __p1_290; \ - float16x8_t __s2_290 = __p2_290; \ - float16x8_t __rev0_290; __rev0_290 = __builtin_shufflevector(__s0_290, __s0_290, 7, 6, 5, 4, 3, 2, 1, 0); \ - float16x8_t __rev1_290; __rev1_290 = __builtin_shufflevector(__s1_290, __s1_290, 7, 6, 5, 4, 3, 2, 1, 0); \ - float16x8_t __rev2_290; __rev2_290 = __builtin_shufflevector(__s2_290, __s2_290, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_290 = __noswap_vfmaq_laneq_f16(__rev0_290, -__rev1_290, __rev2_290, __p3_290); \ - __ret_290 = __builtin_shufflevector(__ret_290, __ret_290, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_290; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vfms_laneq_f16(__p0_291, __p1_291, __p2_291, __p3_291) __extension__ ({ \ - float16x4_t __ret_291; \ - float16x4_t __s0_291 = __p0_291; \ - float16x4_t __s1_291 = __p1_291; \ - float16x8_t __s2_291 = __p2_291; \ - __ret_291 = vfma_laneq_f16(__s0_291, -__s1_291, __s2_291, __p3_291); \ - __ret_291; \ -}) -#else -#define vfms_laneq_f16(__p0_292, __p1_292, __p2_292, __p3_292) __extension__ ({ \ - float16x4_t __ret_292; \ - float16x4_t __s0_292 = __p0_292; \ - float16x4_t __s1_292 = __p1_292; \ - float16x8_t __s2_292 = __p2_292; \ - float16x4_t __rev0_292; __rev0_292 = __builtin_shufflevector(__s0_292, __s0_292, 3, 2, 1, 0); \ - float16x4_t __rev1_292; __rev1_292 = __builtin_shufflevector(__s1_292, __s1_292, 3, 2, 1, 0); \ - float16x8_t __rev2_292; __rev2_292 = __builtin_shufflevector(__s2_292, __s2_292, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_292 = __noswap_vfma_laneq_f16(__rev0_292, -__rev1_292, __rev2_292, __p3_292); \ - __ret_292 = __builtin_shufflevector(__ret_292, __ret_292, 3, 2, 1, 0); \ - __ret_292; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vfmsq_n_f16(__p0, __p1, __p2) __extension__ ({ \ - float16x8_t __ret; \ - float16x8_t __s0 = __p0; \ - float16x8_t __s1 = __p1; \ - float16_t __s2 = __p2; \ - __ret = vfmaq_f16(__s0, -__s1, (float16x8_t) {__s2, __s2, __s2, __s2, __s2, __s2, __s2, __s2}); \ - __ret; \ -}) -#else -#define vfmsq_n_f16(__p0, __p1, __p2) __extension__ ({ \ - float16x8_t __ret; \ - float16x8_t __s0 = __p0; \ - float16x8_t __s1 = __p1; \ - float16_t __s2 = __p2; \ - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - float16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret = __noswap_vfmaq_f16(__rev0, -__rev1, (float16x8_t) {__s2, __s2, __s2, __s2, __s2, __s2, __s2, __s2}); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vfms_n_f16(__p0, __p1, __p2) __extension__ ({ \ - float16x4_t __ret; \ - float16x4_t __s0 = __p0; \ - float16x4_t __s1 = __p1; \ - float16_t __s2 = __p2; \ - __ret = vfma_f16(__s0, -__s1, (float16x4_t) {__s2, __s2, __s2, __s2}); \ - __ret; \ -}) -#else -#define vfms_n_f16(__p0, __p1, __p2) __extension__ ({ \ - float16x4_t __ret; \ - float16x4_t __s0 = __p0; \ - float16x4_t __s1 = __p1; \ - float16_t __s2 = __p2; \ - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - float16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ - __ret = __noswap_vfma_f16(__rev0, -__rev1, (float16x4_t) {__s2, __s2, __s2, __s2}); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmaxnmvq_f16(__p0) __extension__ ({ \ - float16_t __ret; \ - float16x8_t __s0 = __p0; \ - __ret = (float16_t) __builtin_neon_vmaxnmvq_f16((int8x16_t)__s0); \ - __ret; \ -}) -#else -#define vmaxnmvq_f16(__p0) __extension__ ({ \ - float16_t __ret; \ - float16x8_t __s0 = __p0; \ - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret = (float16_t) __builtin_neon_vmaxnmvq_f16((int8x16_t)__rev0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmaxnmv_f16(__p0) __extension__ ({ \ - float16_t __ret; \ - float16x4_t __s0 = __p0; \ - __ret = (float16_t) __builtin_neon_vmaxnmv_f16((int8x8_t)__s0); \ - __ret; \ -}) -#else -#define vmaxnmv_f16(__p0) __extension__ ({ \ - float16_t __ret; \ - float16x4_t __s0 = __p0; \ - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - __ret = (float16_t) __builtin_neon_vmaxnmv_f16((int8x8_t)__rev0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmaxvq_f16(__p0) __extension__ ({ \ - float16_t __ret; \ - float16x8_t __s0 = __p0; \ - __ret = (float16_t) __builtin_neon_vmaxvq_f16((int8x16_t)__s0); \ - __ret; \ -}) -#else -#define vmaxvq_f16(__p0) __extension__ ({ \ - float16_t __ret; \ - float16x8_t __s0 = __p0; \ - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret = (float16_t) __builtin_neon_vmaxvq_f16((int8x16_t)__rev0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmaxv_f16(__p0) __extension__ ({ \ - float16_t __ret; \ - float16x4_t __s0 = __p0; \ - __ret = (float16_t) __builtin_neon_vmaxv_f16((int8x8_t)__s0); \ - __ret; \ -}) -#else -#define vmaxv_f16(__p0) __extension__ ({ \ - float16_t __ret; \ - float16x4_t __s0 = __p0; \ - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - __ret = (float16_t) __builtin_neon_vmaxv_f16((int8x8_t)__rev0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vminnmvq_f16(__p0) __extension__ ({ \ - float16_t __ret; \ - float16x8_t __s0 = __p0; \ - __ret = (float16_t) __builtin_neon_vminnmvq_f16((int8x16_t)__s0); \ - __ret; \ -}) -#else -#define vminnmvq_f16(__p0) __extension__ ({ \ - float16_t __ret; \ - float16x8_t __s0 = __p0; \ - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret = (float16_t) __builtin_neon_vminnmvq_f16((int8x16_t)__rev0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vminnmv_f16(__p0) __extension__ ({ \ - float16_t __ret; \ - float16x4_t __s0 = __p0; \ - __ret = (float16_t) __builtin_neon_vminnmv_f16((int8x8_t)__s0); \ - __ret; \ -}) -#else -#define vminnmv_f16(__p0) __extension__ ({ \ - float16_t __ret; \ - float16x4_t __s0 = __p0; \ - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - __ret = (float16_t) __builtin_neon_vminnmv_f16((int8x8_t)__rev0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vminvq_f16(__p0) __extension__ ({ \ - float16_t __ret; \ - float16x8_t __s0 = __p0; \ - __ret = (float16_t) __builtin_neon_vminvq_f16((int8x16_t)__s0); \ - __ret; \ -}) -#else -#define vminvq_f16(__p0) __extension__ ({ \ - float16_t __ret; \ - float16x8_t __s0 = __p0; \ - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret = (float16_t) __builtin_neon_vminvq_f16((int8x16_t)__rev0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vminv_f16(__p0) __extension__ ({ \ - float16_t __ret; \ - float16x4_t __s0 = __p0; \ - __ret = (float16_t) __builtin_neon_vminv_f16((int8x8_t)__s0); \ - __ret; \ -}) -#else -#define vminv_f16(__p0) __extension__ ({ \ - float16_t __ret; \ - float16x4_t __s0 = __p0; \ - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - __ret = (float16_t) __builtin_neon_vminv_f16((int8x8_t)__rev0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmulq_laneq_f16(__p0_293, __p1_293, __p2_293) __extension__ ({ \ - float16x8_t __ret_293; \ - float16x8_t __s0_293 = __p0_293; \ - float16x8_t __s1_293 = __p1_293; \ - __ret_293 = __s0_293 * splatq_laneq_f16(__s1_293, __p2_293); \ - __ret_293; \ -}) -#else -#define vmulq_laneq_f16(__p0_294, __p1_294, __p2_294) __extension__ ({ \ - float16x8_t __ret_294; \ - float16x8_t __s0_294 = __p0_294; \ - float16x8_t __s1_294 = __p1_294; \ - float16x8_t __rev0_294; __rev0_294 = __builtin_shufflevector(__s0_294, __s0_294, 7, 6, 5, 4, 3, 2, 1, 0); \ - float16x8_t __rev1_294; __rev1_294 = __builtin_shufflevector(__s1_294, __s1_294, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_294 = __rev0_294 * __noswap_splatq_laneq_f16(__rev1_294, __p2_294); \ - __ret_294 = __builtin_shufflevector(__ret_294, __ret_294, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_294; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmul_laneq_f16(__p0_295, __p1_295, __p2_295) __extension__ ({ \ - float16x4_t __ret_295; \ - float16x4_t __s0_295 = __p0_295; \ - float16x8_t __s1_295 = __p1_295; \ - __ret_295 = __s0_295 * splat_laneq_f16(__s1_295, __p2_295); \ - __ret_295; \ -}) -#else -#define vmul_laneq_f16(__p0_296, __p1_296, __p2_296) __extension__ ({ \ - float16x4_t __ret_296; \ - float16x4_t __s0_296 = __p0_296; \ - float16x8_t __s1_296 = __p1_296; \ - float16x4_t __rev0_296; __rev0_296 = __builtin_shufflevector(__s0_296, __s0_296, 3, 2, 1, 0); \ - float16x8_t __rev1_296; __rev1_296 = __builtin_shufflevector(__s1_296, __s1_296, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_296 = __rev0_296 * __noswap_splat_laneq_f16(__rev1_296, __p2_296); \ - __ret_296 = __builtin_shufflevector(__ret_296, __ret_296, 3, 2, 1, 0); \ - __ret_296; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16x8_t vmulxq_f16(float16x8_t __p0, float16x8_t __p1) { - float16x8_t __ret; - __ret = (float16x8_t) __builtin_neon_vmulxq_v((int8x16_t)__p0, (int8x16_t)__p1, 40); - return __ret; -} -#else -__ai float16x8_t vmulxq_f16(float16x8_t __p0, float16x8_t __p1) { - float16x8_t __ret; - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (float16x8_t) __builtin_neon_vmulxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 40); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -__ai float16x8_t __noswap_vmulxq_f16(float16x8_t __p0, float16x8_t __p1) { - float16x8_t __ret; - __ret = (float16x8_t) __builtin_neon_vmulxq_v((int8x16_t)__p0, (int8x16_t)__p1, 40); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16x4_t vmulx_f16(float16x4_t __p0, float16x4_t __p1) { - float16x4_t __ret; - __ret = (float16x4_t) __builtin_neon_vmulx_v((int8x8_t)__p0, (int8x8_t)__p1, 8); - return __ret; -} -#else -__ai float16x4_t vmulx_f16(float16x4_t __p0, float16x4_t __p1) { - float16x4_t __ret; - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = (float16x4_t) __builtin_neon_vmulx_v((int8x8_t)__rev0, (int8x8_t)__rev1, 8); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -__ai float16x4_t __noswap_vmulx_f16(float16x4_t __p0, float16x4_t __p1) { - float16x4_t __ret; - __ret = (float16x4_t) __builtin_neon_vmulx_v((int8x8_t)__p0, (int8x8_t)__p1, 8); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmulxh_lane_f16(__p0, __p1, __p2) __extension__ ({ \ - float16_t __ret; \ - float16_t __s0 = __p0; \ - float16x4_t __s1 = __p1; \ - __ret = (float16_t) __builtin_neon_vmulxh_lane_f16(__s0, (float16x4_t)__s1, __p2); \ - __ret; \ -}) -#else -#define vmulxh_lane_f16(__p0, __p1, __p2) __extension__ ({ \ - float16_t __ret; \ - float16_t __s0 = __p0; \ - float16x4_t __s1 = __p1; \ - float16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ - __ret = (float16_t) __builtin_neon_vmulxh_lane_f16(__s0, (float16x4_t)__rev1, __p2); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmulxq_lane_f16(__p0_297, __p1_297, __p2_297) __extension__ ({ \ - float16x8_t __ret_297; \ - float16x8_t __s0_297 = __p0_297; \ - float16x4_t __s1_297 = __p1_297; \ - __ret_297 = vmulxq_f16(__s0_297, splatq_lane_f16(__s1_297, __p2_297)); \ - __ret_297; \ -}) -#else -#define vmulxq_lane_f16(__p0_298, __p1_298, __p2_298) __extension__ ({ \ - float16x8_t __ret_298; \ - float16x8_t __s0_298 = __p0_298; \ - float16x4_t __s1_298 = __p1_298; \ - float16x8_t __rev0_298; __rev0_298 = __builtin_shufflevector(__s0_298, __s0_298, 7, 6, 5, 4, 3, 2, 1, 0); \ - float16x4_t __rev1_298; __rev1_298 = __builtin_shufflevector(__s1_298, __s1_298, 3, 2, 1, 0); \ - __ret_298 = __noswap_vmulxq_f16(__rev0_298, __noswap_splatq_lane_f16(__rev1_298, __p2_298)); \ - __ret_298 = __builtin_shufflevector(__ret_298, __ret_298, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_298; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmulx_lane_f16(__p0_299, __p1_299, __p2_299) __extension__ ({ \ - float16x4_t __ret_299; \ - float16x4_t __s0_299 = __p0_299; \ - float16x4_t __s1_299 = __p1_299; \ - __ret_299 = vmulx_f16(__s0_299, splat_lane_f16(__s1_299, __p2_299)); \ - __ret_299; \ -}) -#else -#define vmulx_lane_f16(__p0_300, __p1_300, __p2_300) __extension__ ({ \ - float16x4_t __ret_300; \ - float16x4_t __s0_300 = __p0_300; \ - float16x4_t __s1_300 = __p1_300; \ - float16x4_t __rev0_300; __rev0_300 = __builtin_shufflevector(__s0_300, __s0_300, 3, 2, 1, 0); \ - float16x4_t __rev1_300; __rev1_300 = __builtin_shufflevector(__s1_300, __s1_300, 3, 2, 1, 0); \ - __ret_300 = __noswap_vmulx_f16(__rev0_300, __noswap_splat_lane_f16(__rev1_300, __p2_300)); \ - __ret_300 = __builtin_shufflevector(__ret_300, __ret_300, 3, 2, 1, 0); \ - __ret_300; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmulxh_laneq_f16(__p0, __p1, __p2) __extension__ ({ \ - float16_t __ret; \ - float16_t __s0 = __p0; \ - float16x8_t __s1 = __p1; \ - __ret = (float16_t) __builtin_neon_vmulxh_laneq_f16(__s0, (float16x8_t)__s1, __p2); \ - __ret; \ -}) -#else -#define vmulxh_laneq_f16(__p0, __p1, __p2) __extension__ ({ \ - float16_t __ret; \ - float16_t __s0 = __p0; \ - float16x8_t __s1 = __p1; \ - float16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret = (float16_t) __builtin_neon_vmulxh_laneq_f16(__s0, (float16x8_t)__rev1, __p2); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmulxq_laneq_f16(__p0_301, __p1_301, __p2_301) __extension__ ({ \ - float16x8_t __ret_301; \ - float16x8_t __s0_301 = __p0_301; \ - float16x8_t __s1_301 = __p1_301; \ - __ret_301 = vmulxq_f16(__s0_301, splatq_laneq_f16(__s1_301, __p2_301)); \ - __ret_301; \ -}) -#else -#define vmulxq_laneq_f16(__p0_302, __p1_302, __p2_302) __extension__ ({ \ - float16x8_t __ret_302; \ - float16x8_t __s0_302 = __p0_302; \ - float16x8_t __s1_302 = __p1_302; \ - float16x8_t __rev0_302; __rev0_302 = __builtin_shufflevector(__s0_302, __s0_302, 7, 6, 5, 4, 3, 2, 1, 0); \ - float16x8_t __rev1_302; __rev1_302 = __builtin_shufflevector(__s1_302, __s1_302, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_302 = __noswap_vmulxq_f16(__rev0_302, __noswap_splatq_laneq_f16(__rev1_302, __p2_302)); \ - __ret_302 = __builtin_shufflevector(__ret_302, __ret_302, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_302; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmulx_laneq_f16(__p0_303, __p1_303, __p2_303) __extension__ ({ \ - float16x4_t __ret_303; \ - float16x4_t __s0_303 = __p0_303; \ - float16x8_t __s1_303 = __p1_303; \ - __ret_303 = vmulx_f16(__s0_303, splat_laneq_f16(__s1_303, __p2_303)); \ - __ret_303; \ -}) -#else -#define vmulx_laneq_f16(__p0_304, __p1_304, __p2_304) __extension__ ({ \ - float16x4_t __ret_304; \ - float16x4_t __s0_304 = __p0_304; \ - float16x8_t __s1_304 = __p1_304; \ - float16x4_t __rev0_304; __rev0_304 = __builtin_shufflevector(__s0_304, __s0_304, 3, 2, 1, 0); \ - float16x8_t __rev1_304; __rev1_304 = __builtin_shufflevector(__s1_304, __s1_304, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_304 = __noswap_vmulx_f16(__rev0_304, __noswap_splat_laneq_f16(__rev1_304, __p2_304)); \ - __ret_304 = __builtin_shufflevector(__ret_304, __ret_304, 3, 2, 1, 0); \ - __ret_304; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmulxq_n_f16(__p0, __p1) __extension__ ({ \ - float16x8_t __ret; \ - float16x8_t __s0 = __p0; \ - float16_t __s1 = __p1; \ - __ret = vmulxq_f16(__s0, (float16x8_t) {__s1, __s1, __s1, __s1, __s1, __s1, __s1, __s1}); \ - __ret; \ -}) -#else -#define vmulxq_n_f16(__p0, __p1) __extension__ ({ \ - float16x8_t __ret; \ - float16x8_t __s0 = __p0; \ - float16_t __s1 = __p1; \ - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret = __noswap_vmulxq_f16(__rev0, (float16x8_t) {__s1, __s1, __s1, __s1, __s1, __s1, __s1, __s1}); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmulx_n_f16(__p0, __p1) __extension__ ({ \ - float16x4_t __ret; \ - float16x4_t __s0 = __p0; \ - float16_t __s1 = __p1; \ - __ret = vmulx_f16(__s0, (float16x4_t) {__s1, __s1, __s1, __s1}); \ - __ret; \ -}) -#else -#define vmulx_n_f16(__p0, __p1) __extension__ ({ \ - float16x4_t __ret; \ - float16x4_t __s0 = __p0; \ - float16_t __s1 = __p1; \ - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - __ret = __noswap_vmulx_f16(__rev0, (float16x4_t) {__s1, __s1, __s1, __s1}); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16x8_t vpaddq_f16(float16x8_t __p0, float16x8_t __p1) { - float16x8_t __ret; - __ret = (float16x8_t) __builtin_neon_vpaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 40); - return __ret; -} -#else -__ai float16x8_t vpaddq_f16(float16x8_t __p0, float16x8_t __p1) { - float16x8_t __ret; - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (float16x8_t) __builtin_neon_vpaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 40); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16x8_t vpmaxq_f16(float16x8_t __p0, float16x8_t __p1) { - float16x8_t __ret; - __ret = (float16x8_t) __builtin_neon_vpmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 40); - return __ret; -} -#else -__ai float16x8_t vpmaxq_f16(float16x8_t __p0, float16x8_t __p1) { - float16x8_t __ret; - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (float16x8_t) __builtin_neon_vpmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 40); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16x8_t vpmaxnmq_f16(float16x8_t __p0, float16x8_t __p1) { - float16x8_t __ret; - __ret = (float16x8_t) __builtin_neon_vpmaxnmq_v((int8x16_t)__p0, (int8x16_t)__p1, 40); - return __ret; -} -#else -__ai float16x8_t vpmaxnmq_f16(float16x8_t __p0, float16x8_t __p1) { - float16x8_t __ret; - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (float16x8_t) __builtin_neon_vpmaxnmq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 40); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16x4_t vpmaxnm_f16(float16x4_t __p0, float16x4_t __p1) { - float16x4_t __ret; - __ret = (float16x4_t) __builtin_neon_vpmaxnm_v((int8x8_t)__p0, (int8x8_t)__p1, 8); - return __ret; -} -#else -__ai float16x4_t vpmaxnm_f16(float16x4_t __p0, float16x4_t __p1) { - float16x4_t __ret; - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = (float16x4_t) __builtin_neon_vpmaxnm_v((int8x8_t)__rev0, (int8x8_t)__rev1, 8); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16x8_t vpminq_f16(float16x8_t __p0, float16x8_t __p1) { - float16x8_t __ret; - __ret = (float16x8_t) __builtin_neon_vpminq_v((int8x16_t)__p0, (int8x16_t)__p1, 40); - return __ret; -} -#else -__ai float16x8_t vpminq_f16(float16x8_t __p0, float16x8_t __p1) { - float16x8_t __ret; - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (float16x8_t) __builtin_neon_vpminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 40); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16x8_t vpminnmq_f16(float16x8_t __p0, float16x8_t __p1) { - float16x8_t __ret; - __ret = (float16x8_t) __builtin_neon_vpminnmq_v((int8x16_t)__p0, (int8x16_t)__p1, 40); - return __ret; -} -#else -__ai float16x8_t vpminnmq_f16(float16x8_t __p0, float16x8_t __p1) { - float16x8_t __ret; - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (float16x8_t) __builtin_neon_vpminnmq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 40); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16x4_t vpminnm_f16(float16x4_t __p0, float16x4_t __p1) { - float16x4_t __ret; - __ret = (float16x4_t) __builtin_neon_vpminnm_v((int8x8_t)__p0, (int8x8_t)__p1, 8); - return __ret; -} -#else -__ai float16x4_t vpminnm_f16(float16x4_t __p0, float16x4_t __p1) { - float16x4_t __ret; - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = (float16x4_t) __builtin_neon_vpminnm_v((int8x8_t)__rev0, (int8x8_t)__rev1, 8); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16x8_t vrndiq_f16(float16x8_t __p0) { - float16x8_t __ret; - __ret = (float16x8_t) __builtin_neon_vrndiq_v((int8x16_t)__p0, 40); - return __ret; -} -#else -__ai float16x8_t vrndiq_f16(float16x8_t __p0) { - float16x8_t __ret; - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (float16x8_t) __builtin_neon_vrndiq_v((int8x16_t)__rev0, 40); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16x4_t vrndi_f16(float16x4_t __p0) { - float16x4_t __ret; - __ret = (float16x4_t) __builtin_neon_vrndi_v((int8x8_t)__p0, 8); - return __ret; -} -#else -__ai float16x4_t vrndi_f16(float16x4_t __p0) { - float16x4_t __ret; - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - __ret = (float16x4_t) __builtin_neon_vrndi_v((int8x8_t)__rev0, 8); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16x8_t vsqrtq_f16(float16x8_t __p0) { - float16x8_t __ret; - __ret = (float16x8_t) __builtin_neon_vsqrtq_v((int8x16_t)__p0, 40); - return __ret; -} -#else -__ai float16x8_t vsqrtq_f16(float16x8_t __p0) { - float16x8_t __ret; - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (float16x8_t) __builtin_neon_vsqrtq_v((int8x16_t)__rev0, 40); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16x4_t vsqrt_f16(float16x4_t __p0) { - float16x4_t __ret; - __ret = (float16x4_t) __builtin_neon_vsqrt_v((int8x8_t)__p0, 8); - return __ret; -} -#else -__ai float16x4_t vsqrt_f16(float16x4_t __p0) { - float16x4_t __ret; - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - __ret = (float16x4_t) __builtin_neon_vsqrt_v((int8x8_t)__rev0, 8); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16x8_t vtrn1q_f16(float16x8_t __p0, float16x8_t __p1) { - float16x8_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 2, 10, 4, 12, 6, 14); - return __ret; -} -#else -__ai float16x8_t vtrn1q_f16(float16x8_t __p0, float16x8_t __p1) { - float16x8_t __ret; - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 2, 10, 4, 12, 6, 14); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16x4_t vtrn1_f16(float16x4_t __p0, float16x4_t __p1) { - float16x4_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 2, 6); - return __ret; -} -#else -__ai float16x4_t vtrn1_f16(float16x4_t __p0, float16x4_t __p1) { - float16x4_t __ret; - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 2, 6); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16x8_t vtrn2q_f16(float16x8_t __p0, float16x8_t __p1) { - float16x8_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 1, 9, 3, 11, 5, 13, 7, 15); - return __ret; -} -#else -__ai float16x8_t vtrn2q_f16(float16x8_t __p0, float16x8_t __p1) { - float16x8_t __ret; - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __builtin_shufflevector(__rev0, __rev1, 1, 9, 3, 11, 5, 13, 7, 15); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16x4_t vtrn2_f16(float16x4_t __p0, float16x4_t __p1) { - float16x4_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 1, 5, 3, 7); - return __ret; -} -#else -__ai float16x4_t vtrn2_f16(float16x4_t __p0, float16x4_t __p1) { - float16x4_t __ret; - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = __builtin_shufflevector(__rev0, __rev1, 1, 5, 3, 7); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16x8_t vuzp1q_f16(float16x8_t __p0, float16x8_t __p1) { - float16x8_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6, 8, 10, 12, 14); - return __ret; -} -#else -__ai float16x8_t vuzp1q_f16(float16x8_t __p0, float16x8_t __p1) { - float16x8_t __ret; - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6, 8, 10, 12, 14); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16x4_t vuzp1_f16(float16x4_t __p0, float16x4_t __p1) { - float16x4_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6); - return __ret; -} -#else -__ai float16x4_t vuzp1_f16(float16x4_t __p0, float16x4_t __p1) { - float16x4_t __ret; - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16x8_t vuzp2q_f16(float16x8_t __p0, float16x8_t __p1) { - float16x8_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7, 9, 11, 13, 15); - return __ret; -} -#else -__ai float16x8_t vuzp2q_f16(float16x8_t __p0, float16x8_t __p1) { - float16x8_t __ret; - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7, 9, 11, 13, 15); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16x4_t vuzp2_f16(float16x4_t __p0, float16x4_t __p1) { - float16x4_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7); - return __ret; -} -#else -__ai float16x4_t vuzp2_f16(float16x4_t __p0, float16x4_t __p1) { - float16x4_t __ret; - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16x8_t vzip1q_f16(float16x8_t __p0, float16x8_t __p1) { - float16x8_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 1, 9, 2, 10, 3, 11); - return __ret; -} -#else -__ai float16x8_t vzip1q_f16(float16x8_t __p0, float16x8_t __p1) { - float16x8_t __ret; - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 1, 9, 2, 10, 3, 11); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16x4_t vzip1_f16(float16x4_t __p0, float16x4_t __p1) { - float16x4_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 1, 5); - return __ret; -} -#else -__ai float16x4_t vzip1_f16(float16x4_t __p0, float16x4_t __p1) { - float16x4_t __ret; - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 1, 5); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16x8_t vzip2q_f16(float16x8_t __p0, float16x8_t __p1) { - float16x8_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 4, 12, 5, 13, 6, 14, 7, 15); - return __ret; -} -#else -__ai float16x8_t vzip2q_f16(float16x8_t __p0, float16x8_t __p1) { - float16x8_t __ret; - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __builtin_shufflevector(__rev0, __rev1, 4, 12, 5, 13, 6, 14, 7, 15); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16x4_t vzip2_f16(float16x4_t __p0, float16x4_t __p1) { - float16x4_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 2, 6, 3, 7); - return __ret; -} -#else -__ai float16x4_t vzip2_f16(float16x4_t __p0, float16x4_t __p1) { - float16x4_t __ret; - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = __builtin_shufflevector(__rev0, __rev1, 2, 6, 3, 7); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#endif -#if defined(__ARM_FEATURE_MATMUL_INT8) -#ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vmmlaq_u32(uint32x4_t __p0, uint8x16_t __p1, uint8x16_t __p2) { - uint32x4_t __ret; - __ret = (uint32x4_t) __builtin_neon_vmmlaq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 50); - return __ret; -} -#else -__ai uint32x4_t vmmlaq_u32(uint32x4_t __p0, uint8x16_t __p1, uint8x16_t __p2) { - uint32x4_t __ret; - uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint32x4_t) __builtin_neon_vmmlaq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 50); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vmmlaq_s32(int32x4_t __p0, int8x16_t __p1, int8x16_t __p2) { - int32x4_t __ret; - __ret = (int32x4_t) __builtin_neon_vmmlaq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 34); - return __ret; -} -#else -__ai int32x4_t vmmlaq_s32(int32x4_t __p0, int8x16_t __p1, int8x16_t __p2) { - int32x4_t __ret; - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - int8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (int32x4_t) __builtin_neon_vmmlaq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 34); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vusdotq_s32(int32x4_t __p0, uint8x16_t __p1, int8x16_t __p2) { - int32x4_t __ret; - __ret = (int32x4_t) __builtin_neon_vusdotq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 34); - return __ret; -} -#else -__ai int32x4_t vusdotq_s32(int32x4_t __p0, uint8x16_t __p1, int8x16_t __p2) { - int32x4_t __ret; - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - int8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (int32x4_t) __builtin_neon_vusdotq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 34); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -__ai int32x4_t __noswap_vusdotq_s32(int32x4_t __p0, uint8x16_t __p1, int8x16_t __p2) { - int32x4_t __ret; - __ret = (int32x4_t) __builtin_neon_vusdotq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 34); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x2_t vusdot_s32(int32x2_t __p0, uint8x8_t __p1, int8x8_t __p2) { - int32x2_t __ret; - __ret = (int32x2_t) __builtin_neon_vusdot_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 2); - return __ret; -} -#else -__ai int32x2_t vusdot_s32(int32x2_t __p0, uint8x8_t __p1, int8x8_t __p2) { - int32x2_t __ret; - int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - int8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (int32x2_t) __builtin_neon_vusdot_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 2); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -__ai int32x2_t __noswap_vusdot_s32(int32x2_t __p0, uint8x8_t __p1, int8x8_t __p2) { - int32x2_t __ret; - __ret = (int32x2_t) __builtin_neon_vusdot_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 2); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vusdotq_lane_s32(__p0_305, __p1_305, __p2_305, __p3_305) __extension__ ({ \ - int32x4_t __ret_305; \ - int32x4_t __s0_305 = __p0_305; \ - uint8x16_t __s1_305 = __p1_305; \ - int8x8_t __s2_305 = __p2_305; \ -int8x8_t __reint_305 = __s2_305; \ - __ret_305 = vusdotq_s32(__s0_305, __s1_305, (int8x16_t)(splatq_lane_s32(*(int32x2_t *) &__reint_305, __p3_305))); \ - __ret_305; \ -}) -#else -#define vusdotq_lane_s32(__p0_306, __p1_306, __p2_306, __p3_306) __extension__ ({ \ - int32x4_t __ret_306; \ - int32x4_t __s0_306 = __p0_306; \ - uint8x16_t __s1_306 = __p1_306; \ - int8x8_t __s2_306 = __p2_306; \ - int32x4_t __rev0_306; __rev0_306 = __builtin_shufflevector(__s0_306, __s0_306, 3, 2, 1, 0); \ - uint8x16_t __rev1_306; __rev1_306 = __builtin_shufflevector(__s1_306, __s1_306, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - int8x8_t __rev2_306; __rev2_306 = __builtin_shufflevector(__s2_306, __s2_306, 7, 6, 5, 4, 3, 2, 1, 0); \ -int8x8_t __reint_306 = __rev2_306; \ - __ret_306 = __noswap_vusdotq_s32(__rev0_306, __rev1_306, (int8x16_t)(__noswap_splatq_lane_s32(*(int32x2_t *) &__reint_306, __p3_306))); \ - __ret_306 = __builtin_shufflevector(__ret_306, __ret_306, 3, 2, 1, 0); \ - __ret_306; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vusdot_lane_s32(__p0_307, __p1_307, __p2_307, __p3_307) __extension__ ({ \ - int32x2_t __ret_307; \ - int32x2_t __s0_307 = __p0_307; \ - uint8x8_t __s1_307 = __p1_307; \ - int8x8_t __s2_307 = __p2_307; \ -int8x8_t __reint_307 = __s2_307; \ - __ret_307 = vusdot_s32(__s0_307, __s1_307, (int8x8_t)(splat_lane_s32(*(int32x2_t *) &__reint_307, __p3_307))); \ - __ret_307; \ -}) -#else -#define vusdot_lane_s32(__p0_308, __p1_308, __p2_308, __p3_308) __extension__ ({ \ - int32x2_t __ret_308; \ - int32x2_t __s0_308 = __p0_308; \ - uint8x8_t __s1_308 = __p1_308; \ - int8x8_t __s2_308 = __p2_308; \ - int32x2_t __rev0_308; __rev0_308 = __builtin_shufflevector(__s0_308, __s0_308, 1, 0); \ - uint8x8_t __rev1_308; __rev1_308 = __builtin_shufflevector(__s1_308, __s1_308, 7, 6, 5, 4, 3, 2, 1, 0); \ - int8x8_t __rev2_308; __rev2_308 = __builtin_shufflevector(__s2_308, __s2_308, 7, 6, 5, 4, 3, 2, 1, 0); \ -int8x8_t __reint_308 = __rev2_308; \ - __ret_308 = __noswap_vusdot_s32(__rev0_308, __rev1_308, (int8x8_t)(__noswap_splat_lane_s32(*(int32x2_t *) &__reint_308, __p3_308))); \ - __ret_308 = __builtin_shufflevector(__ret_308, __ret_308, 1, 0); \ - __ret_308; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vusmmlaq_s32(int32x4_t __p0, uint8x16_t __p1, int8x16_t __p2) { - int32x4_t __ret; - __ret = (int32x4_t) __builtin_neon_vusmmlaq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 34); - return __ret; -} -#else -__ai int32x4_t vusmmlaq_s32(int32x4_t __p0, uint8x16_t __p1, int8x16_t __p2) { - int32x4_t __ret; - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - int8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (int32x4_t) __builtin_neon_vusmmlaq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 34); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#endif -#if defined(__ARM_FEATURE_QRDMX) -#ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vqrdmlahq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) { - int32x4_t __ret; - __ret = (int32x4_t) __builtin_neon_vqrdmlahq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 34); - return __ret; -} -#else -__ai int32x4_t vqrdmlahq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) { - int32x4_t __ret; - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - int32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); - __ret = (int32x4_t) __builtin_neon_vqrdmlahq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 34); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -__ai int32x4_t __noswap_vqrdmlahq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) { - int32x4_t __ret; - __ret = (int32x4_t) __builtin_neon_vqrdmlahq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 34); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vqrdmlahq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) { - int16x8_t __ret; - __ret = (int16x8_t) __builtin_neon_vqrdmlahq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 33); - return __ret; -} -#else -__ai int16x8_t vqrdmlahq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) { - int16x8_t __ret; - int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - int16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (int16x8_t) __builtin_neon_vqrdmlahq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 33); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -__ai int16x8_t __noswap_vqrdmlahq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) { - int16x8_t __ret; - __ret = (int16x8_t) __builtin_neon_vqrdmlahq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 33); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x2_t vqrdmlah_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) { - int32x2_t __ret; - __ret = (int32x2_t) __builtin_neon_vqrdmlah_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 2); - return __ret; -} -#else -__ai int32x2_t vqrdmlah_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) { - int32x2_t __ret; - int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - int32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); - __ret = (int32x2_t) __builtin_neon_vqrdmlah_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 2); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -__ai int32x2_t __noswap_vqrdmlah_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) { - int32x2_t __ret; - __ret = (int32x2_t) __builtin_neon_vqrdmlah_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 2); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16x4_t vqrdmlah_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) { - int16x4_t __ret; - __ret = (int16x4_t) __builtin_neon_vqrdmlah_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 1); - return __ret; -} -#else -__ai int16x4_t vqrdmlah_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) { - int16x4_t __ret; - int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - int16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); - __ret = (int16x4_t) __builtin_neon_vqrdmlah_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 1); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -__ai int16x4_t __noswap_vqrdmlah_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) { - int16x4_t __ret; - __ret = (int16x4_t) __builtin_neon_vqrdmlah_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 1); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vqrdmlahq_lane_s32(__p0_309, __p1_309, __p2_309, __p3_309) __extension__ ({ \ - int32x4_t __ret_309; \ - int32x4_t __s0_309 = __p0_309; \ - int32x4_t __s1_309 = __p1_309; \ - int32x2_t __s2_309 = __p2_309; \ - __ret_309 = vqrdmlahq_s32(__s0_309, __s1_309, splatq_lane_s32(__s2_309, __p3_309)); \ - __ret_309; \ -}) -#else -#define vqrdmlahq_lane_s32(__p0_310, __p1_310, __p2_310, __p3_310) __extension__ ({ \ - int32x4_t __ret_310; \ - int32x4_t __s0_310 = __p0_310; \ - int32x4_t __s1_310 = __p1_310; \ - int32x2_t __s2_310 = __p2_310; \ - int32x4_t __rev0_310; __rev0_310 = __builtin_shufflevector(__s0_310, __s0_310, 3, 2, 1, 0); \ - int32x4_t __rev1_310; __rev1_310 = __builtin_shufflevector(__s1_310, __s1_310, 3, 2, 1, 0); \ - int32x2_t __rev2_310; __rev2_310 = __builtin_shufflevector(__s2_310, __s2_310, 1, 0); \ - __ret_310 = __noswap_vqrdmlahq_s32(__rev0_310, __rev1_310, __noswap_splatq_lane_s32(__rev2_310, __p3_310)); \ - __ret_310 = __builtin_shufflevector(__ret_310, __ret_310, 3, 2, 1, 0); \ - __ret_310; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vqrdmlahq_lane_s16(__p0_311, __p1_311, __p2_311, __p3_311) __extension__ ({ \ - int16x8_t __ret_311; \ - int16x8_t __s0_311 = __p0_311; \ - int16x8_t __s1_311 = __p1_311; \ - int16x4_t __s2_311 = __p2_311; \ - __ret_311 = vqrdmlahq_s16(__s0_311, __s1_311, splatq_lane_s16(__s2_311, __p3_311)); \ - __ret_311; \ -}) -#else -#define vqrdmlahq_lane_s16(__p0_312, __p1_312, __p2_312, __p3_312) __extension__ ({ \ - int16x8_t __ret_312; \ - int16x8_t __s0_312 = __p0_312; \ - int16x8_t __s1_312 = __p1_312; \ - int16x4_t __s2_312 = __p2_312; \ - int16x8_t __rev0_312; __rev0_312 = __builtin_shufflevector(__s0_312, __s0_312, 7, 6, 5, 4, 3, 2, 1, 0); \ - int16x8_t __rev1_312; __rev1_312 = __builtin_shufflevector(__s1_312, __s1_312, 7, 6, 5, 4, 3, 2, 1, 0); \ - int16x4_t __rev2_312; __rev2_312 = __builtin_shufflevector(__s2_312, __s2_312, 3, 2, 1, 0); \ - __ret_312 = __noswap_vqrdmlahq_s16(__rev0_312, __rev1_312, __noswap_splatq_lane_s16(__rev2_312, __p3_312)); \ - __ret_312 = __builtin_shufflevector(__ret_312, __ret_312, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_312; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vqrdmlah_lane_s32(__p0_313, __p1_313, __p2_313, __p3_313) __extension__ ({ \ - int32x2_t __ret_313; \ - int32x2_t __s0_313 = __p0_313; \ - int32x2_t __s1_313 = __p1_313; \ - int32x2_t __s2_313 = __p2_313; \ - __ret_313 = vqrdmlah_s32(__s0_313, __s1_313, splat_lane_s32(__s2_313, __p3_313)); \ - __ret_313; \ -}) -#else -#define vqrdmlah_lane_s32(__p0_314, __p1_314, __p2_314, __p3_314) __extension__ ({ \ - int32x2_t __ret_314; \ - int32x2_t __s0_314 = __p0_314; \ - int32x2_t __s1_314 = __p1_314; \ - int32x2_t __s2_314 = __p2_314; \ - int32x2_t __rev0_314; __rev0_314 = __builtin_shufflevector(__s0_314, __s0_314, 1, 0); \ - int32x2_t __rev1_314; __rev1_314 = __builtin_shufflevector(__s1_314, __s1_314, 1, 0); \ - int32x2_t __rev2_314; __rev2_314 = __builtin_shufflevector(__s2_314, __s2_314, 1, 0); \ - __ret_314 = __noswap_vqrdmlah_s32(__rev0_314, __rev1_314, __noswap_splat_lane_s32(__rev2_314, __p3_314)); \ - __ret_314 = __builtin_shufflevector(__ret_314, __ret_314, 1, 0); \ - __ret_314; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vqrdmlah_lane_s16(__p0_315, __p1_315, __p2_315, __p3_315) __extension__ ({ \ - int16x4_t __ret_315; \ - int16x4_t __s0_315 = __p0_315; \ - int16x4_t __s1_315 = __p1_315; \ - int16x4_t __s2_315 = __p2_315; \ - __ret_315 = vqrdmlah_s16(__s0_315, __s1_315, splat_lane_s16(__s2_315, __p3_315)); \ - __ret_315; \ -}) -#else -#define vqrdmlah_lane_s16(__p0_316, __p1_316, __p2_316, __p3_316) __extension__ ({ \ - int16x4_t __ret_316; \ - int16x4_t __s0_316 = __p0_316; \ - int16x4_t __s1_316 = __p1_316; \ - int16x4_t __s2_316 = __p2_316; \ - int16x4_t __rev0_316; __rev0_316 = __builtin_shufflevector(__s0_316, __s0_316, 3, 2, 1, 0); \ - int16x4_t __rev1_316; __rev1_316 = __builtin_shufflevector(__s1_316, __s1_316, 3, 2, 1, 0); \ - int16x4_t __rev2_316; __rev2_316 = __builtin_shufflevector(__s2_316, __s2_316, 3, 2, 1, 0); \ - __ret_316 = __noswap_vqrdmlah_s16(__rev0_316, __rev1_316, __noswap_splat_lane_s16(__rev2_316, __p3_316)); \ - __ret_316 = __builtin_shufflevector(__ret_316, __ret_316, 3, 2, 1, 0); \ - __ret_316; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vqrdmlshq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) { - int32x4_t __ret; - __ret = (int32x4_t) __builtin_neon_vqrdmlshq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 34); - return __ret; -} -#else -__ai int32x4_t vqrdmlshq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) { - int32x4_t __ret; - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - int32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); - __ret = (int32x4_t) __builtin_neon_vqrdmlshq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 34); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -__ai int32x4_t __noswap_vqrdmlshq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) { - int32x4_t __ret; - __ret = (int32x4_t) __builtin_neon_vqrdmlshq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 34); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vqrdmlshq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) { - int16x8_t __ret; - __ret = (int16x8_t) __builtin_neon_vqrdmlshq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 33); - return __ret; -} -#else -__ai int16x8_t vqrdmlshq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) { - int16x8_t __ret; - int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - int16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (int16x8_t) __builtin_neon_vqrdmlshq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 33); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -__ai int16x8_t __noswap_vqrdmlshq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) { - int16x8_t __ret; - __ret = (int16x8_t) __builtin_neon_vqrdmlshq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 33); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x2_t vqrdmlsh_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) { - int32x2_t __ret; - __ret = (int32x2_t) __builtin_neon_vqrdmlsh_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 2); - return __ret; -} -#else -__ai int32x2_t vqrdmlsh_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) { - int32x2_t __ret; - int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - int32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); - __ret = (int32x2_t) __builtin_neon_vqrdmlsh_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 2); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -__ai int32x2_t __noswap_vqrdmlsh_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) { - int32x2_t __ret; - __ret = (int32x2_t) __builtin_neon_vqrdmlsh_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 2); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16x4_t vqrdmlsh_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) { - int16x4_t __ret; - __ret = (int16x4_t) __builtin_neon_vqrdmlsh_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 1); - return __ret; -} -#else -__ai int16x4_t vqrdmlsh_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) { - int16x4_t __ret; - int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - int16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); - __ret = (int16x4_t) __builtin_neon_vqrdmlsh_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 1); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -__ai int16x4_t __noswap_vqrdmlsh_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) { - int16x4_t __ret; - __ret = (int16x4_t) __builtin_neon_vqrdmlsh_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 1); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vqrdmlshq_lane_s32(__p0_317, __p1_317, __p2_317, __p3_317) __extension__ ({ \ - int32x4_t __ret_317; \ - int32x4_t __s0_317 = __p0_317; \ - int32x4_t __s1_317 = __p1_317; \ - int32x2_t __s2_317 = __p2_317; \ - __ret_317 = vqrdmlshq_s32(__s0_317, __s1_317, splatq_lane_s32(__s2_317, __p3_317)); \ - __ret_317; \ -}) -#else -#define vqrdmlshq_lane_s32(__p0_318, __p1_318, __p2_318, __p3_318) __extension__ ({ \ - int32x4_t __ret_318; \ - int32x4_t __s0_318 = __p0_318; \ - int32x4_t __s1_318 = __p1_318; \ - int32x2_t __s2_318 = __p2_318; \ - int32x4_t __rev0_318; __rev0_318 = __builtin_shufflevector(__s0_318, __s0_318, 3, 2, 1, 0); \ - int32x4_t __rev1_318; __rev1_318 = __builtin_shufflevector(__s1_318, __s1_318, 3, 2, 1, 0); \ - int32x2_t __rev2_318; __rev2_318 = __builtin_shufflevector(__s2_318, __s2_318, 1, 0); \ - __ret_318 = __noswap_vqrdmlshq_s32(__rev0_318, __rev1_318, __noswap_splatq_lane_s32(__rev2_318, __p3_318)); \ - __ret_318 = __builtin_shufflevector(__ret_318, __ret_318, 3, 2, 1, 0); \ - __ret_318; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vqrdmlshq_lane_s16(__p0_319, __p1_319, __p2_319, __p3_319) __extension__ ({ \ - int16x8_t __ret_319; \ - int16x8_t __s0_319 = __p0_319; \ - int16x8_t __s1_319 = __p1_319; \ - int16x4_t __s2_319 = __p2_319; \ - __ret_319 = vqrdmlshq_s16(__s0_319, __s1_319, splatq_lane_s16(__s2_319, __p3_319)); \ - __ret_319; \ -}) -#else -#define vqrdmlshq_lane_s16(__p0_320, __p1_320, __p2_320, __p3_320) __extension__ ({ \ - int16x8_t __ret_320; \ - int16x8_t __s0_320 = __p0_320; \ - int16x8_t __s1_320 = __p1_320; \ - int16x4_t __s2_320 = __p2_320; \ - int16x8_t __rev0_320; __rev0_320 = __builtin_shufflevector(__s0_320, __s0_320, 7, 6, 5, 4, 3, 2, 1, 0); \ - int16x8_t __rev1_320; __rev1_320 = __builtin_shufflevector(__s1_320, __s1_320, 7, 6, 5, 4, 3, 2, 1, 0); \ - int16x4_t __rev2_320; __rev2_320 = __builtin_shufflevector(__s2_320, __s2_320, 3, 2, 1, 0); \ - __ret_320 = __noswap_vqrdmlshq_s16(__rev0_320, __rev1_320, __noswap_splatq_lane_s16(__rev2_320, __p3_320)); \ - __ret_320 = __builtin_shufflevector(__ret_320, __ret_320, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_320; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vqrdmlsh_lane_s32(__p0_321, __p1_321, __p2_321, __p3_321) __extension__ ({ \ - int32x2_t __ret_321; \ - int32x2_t __s0_321 = __p0_321; \ - int32x2_t __s1_321 = __p1_321; \ - int32x2_t __s2_321 = __p2_321; \ - __ret_321 = vqrdmlsh_s32(__s0_321, __s1_321, splat_lane_s32(__s2_321, __p3_321)); \ - __ret_321; \ -}) -#else -#define vqrdmlsh_lane_s32(__p0_322, __p1_322, __p2_322, __p3_322) __extension__ ({ \ - int32x2_t __ret_322; \ - int32x2_t __s0_322 = __p0_322; \ - int32x2_t __s1_322 = __p1_322; \ - int32x2_t __s2_322 = __p2_322; \ - int32x2_t __rev0_322; __rev0_322 = __builtin_shufflevector(__s0_322, __s0_322, 1, 0); \ - int32x2_t __rev1_322; __rev1_322 = __builtin_shufflevector(__s1_322, __s1_322, 1, 0); \ - int32x2_t __rev2_322; __rev2_322 = __builtin_shufflevector(__s2_322, __s2_322, 1, 0); \ - __ret_322 = __noswap_vqrdmlsh_s32(__rev0_322, __rev1_322, __noswap_splat_lane_s32(__rev2_322, __p3_322)); \ - __ret_322 = __builtin_shufflevector(__ret_322, __ret_322, 1, 0); \ - __ret_322; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vqrdmlsh_lane_s16(__p0_323, __p1_323, __p2_323, __p3_323) __extension__ ({ \ - int16x4_t __ret_323; \ - int16x4_t __s0_323 = __p0_323; \ - int16x4_t __s1_323 = __p1_323; \ - int16x4_t __s2_323 = __p2_323; \ - __ret_323 = vqrdmlsh_s16(__s0_323, __s1_323, splat_lane_s16(__s2_323, __p3_323)); \ - __ret_323; \ -}) -#else -#define vqrdmlsh_lane_s16(__p0_324, __p1_324, __p2_324, __p3_324) __extension__ ({ \ - int16x4_t __ret_324; \ - int16x4_t __s0_324 = __p0_324; \ - int16x4_t __s1_324 = __p1_324; \ - int16x4_t __s2_324 = __p2_324; \ - int16x4_t __rev0_324; __rev0_324 = __builtin_shufflevector(__s0_324, __s0_324, 3, 2, 1, 0); \ - int16x4_t __rev1_324; __rev1_324 = __builtin_shufflevector(__s1_324, __s1_324, 3, 2, 1, 0); \ - int16x4_t __rev2_324; __rev2_324 = __builtin_shufflevector(__s2_324, __s2_324, 3, 2, 1, 0); \ - __ret_324 = __noswap_vqrdmlsh_s16(__rev0_324, __rev1_324, __noswap_splat_lane_s16(__rev2_324, __p3_324)); \ - __ret_324 = __builtin_shufflevector(__ret_324, __ret_324, 3, 2, 1, 0); \ - __ret_324; \ -}) -#endif - -#endif -#if defined(__ARM_FEATURE_QRDMX) && defined(__aarch64__) -__ai int32_t vqrdmlahs_s32(int32_t __p0, int32_t __p1, int32_t __p2) { - int32_t __ret; - __ret = (int32_t) __builtin_neon_vqrdmlahs_s32(__p0, __p1, __p2); - return __ret; -} -__ai int16_t vqrdmlahh_s16(int16_t __p0, int16_t __p1, int16_t __p2) { - int16_t __ret; - __ret = (int16_t) __builtin_neon_vqrdmlahh_s16(__p0, __p1, __p2); - return __ret; -} -#ifdef __LITTLE_ENDIAN__ -#define vqrdmlahs_lane_s32(__p0_325, __p1_325, __p2_325, __p3_325) __extension__ ({ \ - int32_t __ret_325; \ - int32_t __s0_325 = __p0_325; \ - int32_t __s1_325 = __p1_325; \ - int32x2_t __s2_325 = __p2_325; \ - __ret_325 = vqrdmlahs_s32(__s0_325, __s1_325, vget_lane_s32(__s2_325, __p3_325)); \ - __ret_325; \ -}) -#else -#define vqrdmlahs_lane_s32(__p0_326, __p1_326, __p2_326, __p3_326) __extension__ ({ \ - int32_t __ret_326; \ - int32_t __s0_326 = __p0_326; \ - int32_t __s1_326 = __p1_326; \ - int32x2_t __s2_326 = __p2_326; \ - int32x2_t __rev2_326; __rev2_326 = __builtin_shufflevector(__s2_326, __s2_326, 1, 0); \ - __ret_326 = vqrdmlahs_s32(__s0_326, __s1_326, __noswap_vget_lane_s32(__rev2_326, __p3_326)); \ - __ret_326; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vqrdmlahh_lane_s16(__p0_327, __p1_327, __p2_327, __p3_327) __extension__ ({ \ - int16_t __ret_327; \ - int16_t __s0_327 = __p0_327; \ - int16_t __s1_327 = __p1_327; \ - int16x4_t __s2_327 = __p2_327; \ - __ret_327 = vqrdmlahh_s16(__s0_327, __s1_327, vget_lane_s16(__s2_327, __p3_327)); \ - __ret_327; \ -}) -#else -#define vqrdmlahh_lane_s16(__p0_328, __p1_328, __p2_328, __p3_328) __extension__ ({ \ - int16_t __ret_328; \ - int16_t __s0_328 = __p0_328; \ - int16_t __s1_328 = __p1_328; \ - int16x4_t __s2_328 = __p2_328; \ - int16x4_t __rev2_328; __rev2_328 = __builtin_shufflevector(__s2_328, __s2_328, 3, 2, 1, 0); \ - __ret_328 = vqrdmlahh_s16(__s0_328, __s1_328, __noswap_vget_lane_s16(__rev2_328, __p3_328)); \ - __ret_328; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vqrdmlahs_laneq_s32(__p0_329, __p1_329, __p2_329, __p3_329) __extension__ ({ \ - int32_t __ret_329; \ - int32_t __s0_329 = __p0_329; \ - int32_t __s1_329 = __p1_329; \ - int32x4_t __s2_329 = __p2_329; \ - __ret_329 = vqrdmlahs_s32(__s0_329, __s1_329, vgetq_lane_s32(__s2_329, __p3_329)); \ - __ret_329; \ -}) -#else -#define vqrdmlahs_laneq_s32(__p0_330, __p1_330, __p2_330, __p3_330) __extension__ ({ \ - int32_t __ret_330; \ - int32_t __s0_330 = __p0_330; \ - int32_t __s1_330 = __p1_330; \ - int32x4_t __s2_330 = __p2_330; \ - int32x4_t __rev2_330; __rev2_330 = __builtin_shufflevector(__s2_330, __s2_330, 3, 2, 1, 0); \ - __ret_330 = vqrdmlahs_s32(__s0_330, __s1_330, __noswap_vgetq_lane_s32(__rev2_330, __p3_330)); \ - __ret_330; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vqrdmlahh_laneq_s16(__p0_331, __p1_331, __p2_331, __p3_331) __extension__ ({ \ - int16_t __ret_331; \ - int16_t __s0_331 = __p0_331; \ - int16_t __s1_331 = __p1_331; \ - int16x8_t __s2_331 = __p2_331; \ - __ret_331 = vqrdmlahh_s16(__s0_331, __s1_331, vgetq_lane_s16(__s2_331, __p3_331)); \ - __ret_331; \ -}) -#else -#define vqrdmlahh_laneq_s16(__p0_332, __p1_332, __p2_332, __p3_332) __extension__ ({ \ - int16_t __ret_332; \ - int16_t __s0_332 = __p0_332; \ - int16_t __s1_332 = __p1_332; \ - int16x8_t __s2_332 = __p2_332; \ - int16x8_t __rev2_332; __rev2_332 = __builtin_shufflevector(__s2_332, __s2_332, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_332 = vqrdmlahh_s16(__s0_332, __s1_332, __noswap_vgetq_lane_s16(__rev2_332, __p3_332)); \ - __ret_332; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vqrdmlahq_laneq_s32(__p0_333, __p1_333, __p2_333, __p3_333) __extension__ ({ \ - int32x4_t __ret_333; \ - int32x4_t __s0_333 = __p0_333; \ - int32x4_t __s1_333 = __p1_333; \ - int32x4_t __s2_333 = __p2_333; \ - __ret_333 = vqrdmlahq_s32(__s0_333, __s1_333, splatq_laneq_s32(__s2_333, __p3_333)); \ - __ret_333; \ -}) -#else -#define vqrdmlahq_laneq_s32(__p0_334, __p1_334, __p2_334, __p3_334) __extension__ ({ \ - int32x4_t __ret_334; \ - int32x4_t __s0_334 = __p0_334; \ - int32x4_t __s1_334 = __p1_334; \ - int32x4_t __s2_334 = __p2_334; \ - int32x4_t __rev0_334; __rev0_334 = __builtin_shufflevector(__s0_334, __s0_334, 3, 2, 1, 0); \ - int32x4_t __rev1_334; __rev1_334 = __builtin_shufflevector(__s1_334, __s1_334, 3, 2, 1, 0); \ - int32x4_t __rev2_334; __rev2_334 = __builtin_shufflevector(__s2_334, __s2_334, 3, 2, 1, 0); \ - __ret_334 = __noswap_vqrdmlahq_s32(__rev0_334, __rev1_334, __noswap_splatq_laneq_s32(__rev2_334, __p3_334)); \ - __ret_334 = __builtin_shufflevector(__ret_334, __ret_334, 3, 2, 1, 0); \ - __ret_334; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vqrdmlahq_laneq_s16(__p0_335, __p1_335, __p2_335, __p3_335) __extension__ ({ \ - int16x8_t __ret_335; \ - int16x8_t __s0_335 = __p0_335; \ - int16x8_t __s1_335 = __p1_335; \ - int16x8_t __s2_335 = __p2_335; \ - __ret_335 = vqrdmlahq_s16(__s0_335, __s1_335, splatq_laneq_s16(__s2_335, __p3_335)); \ - __ret_335; \ -}) -#else -#define vqrdmlahq_laneq_s16(__p0_336, __p1_336, __p2_336, __p3_336) __extension__ ({ \ - int16x8_t __ret_336; \ - int16x8_t __s0_336 = __p0_336; \ - int16x8_t __s1_336 = __p1_336; \ - int16x8_t __s2_336 = __p2_336; \ - int16x8_t __rev0_336; __rev0_336 = __builtin_shufflevector(__s0_336, __s0_336, 7, 6, 5, 4, 3, 2, 1, 0); \ - int16x8_t __rev1_336; __rev1_336 = __builtin_shufflevector(__s1_336, __s1_336, 7, 6, 5, 4, 3, 2, 1, 0); \ - int16x8_t __rev2_336; __rev2_336 = __builtin_shufflevector(__s2_336, __s2_336, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_336 = __noswap_vqrdmlahq_s16(__rev0_336, __rev1_336, __noswap_splatq_laneq_s16(__rev2_336, __p3_336)); \ - __ret_336 = __builtin_shufflevector(__ret_336, __ret_336, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_336; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vqrdmlah_laneq_s32(__p0_337, __p1_337, __p2_337, __p3_337) __extension__ ({ \ - int32x2_t __ret_337; \ - int32x2_t __s0_337 = __p0_337; \ - int32x2_t __s1_337 = __p1_337; \ - int32x4_t __s2_337 = __p2_337; \ - __ret_337 = vqrdmlah_s32(__s0_337, __s1_337, splat_laneq_s32(__s2_337, __p3_337)); \ - __ret_337; \ -}) -#else -#define vqrdmlah_laneq_s32(__p0_338, __p1_338, __p2_338, __p3_338) __extension__ ({ \ - int32x2_t __ret_338; \ - int32x2_t __s0_338 = __p0_338; \ - int32x2_t __s1_338 = __p1_338; \ - int32x4_t __s2_338 = __p2_338; \ - int32x2_t __rev0_338; __rev0_338 = __builtin_shufflevector(__s0_338, __s0_338, 1, 0); \ - int32x2_t __rev1_338; __rev1_338 = __builtin_shufflevector(__s1_338, __s1_338, 1, 0); \ - int32x4_t __rev2_338; __rev2_338 = __builtin_shufflevector(__s2_338, __s2_338, 3, 2, 1, 0); \ - __ret_338 = __noswap_vqrdmlah_s32(__rev0_338, __rev1_338, __noswap_splat_laneq_s32(__rev2_338, __p3_338)); \ - __ret_338 = __builtin_shufflevector(__ret_338, __ret_338, 1, 0); \ - __ret_338; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vqrdmlah_laneq_s16(__p0_339, __p1_339, __p2_339, __p3_339) __extension__ ({ \ - int16x4_t __ret_339; \ - int16x4_t __s0_339 = __p0_339; \ - int16x4_t __s1_339 = __p1_339; \ - int16x8_t __s2_339 = __p2_339; \ - __ret_339 = vqrdmlah_s16(__s0_339, __s1_339, splat_laneq_s16(__s2_339, __p3_339)); \ - __ret_339; \ -}) -#else -#define vqrdmlah_laneq_s16(__p0_340, __p1_340, __p2_340, __p3_340) __extension__ ({ \ - int16x4_t __ret_340; \ - int16x4_t __s0_340 = __p0_340; \ - int16x4_t __s1_340 = __p1_340; \ - int16x8_t __s2_340 = __p2_340; \ - int16x4_t __rev0_340; __rev0_340 = __builtin_shufflevector(__s0_340, __s0_340, 3, 2, 1, 0); \ - int16x4_t __rev1_340; __rev1_340 = __builtin_shufflevector(__s1_340, __s1_340, 3, 2, 1, 0); \ - int16x8_t __rev2_340; __rev2_340 = __builtin_shufflevector(__s2_340, __s2_340, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_340 = __noswap_vqrdmlah_s16(__rev0_340, __rev1_340, __noswap_splat_laneq_s16(__rev2_340, __p3_340)); \ - __ret_340 = __builtin_shufflevector(__ret_340, __ret_340, 3, 2, 1, 0); \ - __ret_340; \ -}) -#endif - -__ai int32_t vqrdmlshs_s32(int32_t __p0, int32_t __p1, int32_t __p2) { - int32_t __ret; - __ret = (int32_t) __builtin_neon_vqrdmlshs_s32(__p0, __p1, __p2); - return __ret; -} -__ai int16_t vqrdmlshh_s16(int16_t __p0, int16_t __p1, int16_t __p2) { - int16_t __ret; - __ret = (int16_t) __builtin_neon_vqrdmlshh_s16(__p0, __p1, __p2); - return __ret; -} -#ifdef __LITTLE_ENDIAN__ -#define vqrdmlshs_lane_s32(__p0_341, __p1_341, __p2_341, __p3_341) __extension__ ({ \ - int32_t __ret_341; \ - int32_t __s0_341 = __p0_341; \ - int32_t __s1_341 = __p1_341; \ - int32x2_t __s2_341 = __p2_341; \ - __ret_341 = vqrdmlshs_s32(__s0_341, __s1_341, vget_lane_s32(__s2_341, __p3_341)); \ - __ret_341; \ -}) -#else -#define vqrdmlshs_lane_s32(__p0_342, __p1_342, __p2_342, __p3_342) __extension__ ({ \ - int32_t __ret_342; \ - int32_t __s0_342 = __p0_342; \ - int32_t __s1_342 = __p1_342; \ - int32x2_t __s2_342 = __p2_342; \ - int32x2_t __rev2_342; __rev2_342 = __builtin_shufflevector(__s2_342, __s2_342, 1, 0); \ - __ret_342 = vqrdmlshs_s32(__s0_342, __s1_342, __noswap_vget_lane_s32(__rev2_342, __p3_342)); \ - __ret_342; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vqrdmlshh_lane_s16(__p0_343, __p1_343, __p2_343, __p3_343) __extension__ ({ \ - int16_t __ret_343; \ - int16_t __s0_343 = __p0_343; \ - int16_t __s1_343 = __p1_343; \ - int16x4_t __s2_343 = __p2_343; \ - __ret_343 = vqrdmlshh_s16(__s0_343, __s1_343, vget_lane_s16(__s2_343, __p3_343)); \ - __ret_343; \ -}) -#else -#define vqrdmlshh_lane_s16(__p0_344, __p1_344, __p2_344, __p3_344) __extension__ ({ \ - int16_t __ret_344; \ - int16_t __s0_344 = __p0_344; \ - int16_t __s1_344 = __p1_344; \ - int16x4_t __s2_344 = __p2_344; \ - int16x4_t __rev2_344; __rev2_344 = __builtin_shufflevector(__s2_344, __s2_344, 3, 2, 1, 0); \ - __ret_344 = vqrdmlshh_s16(__s0_344, __s1_344, __noswap_vget_lane_s16(__rev2_344, __p3_344)); \ - __ret_344; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vqrdmlshs_laneq_s32(__p0_345, __p1_345, __p2_345, __p3_345) __extension__ ({ \ - int32_t __ret_345; \ - int32_t __s0_345 = __p0_345; \ - int32_t __s1_345 = __p1_345; \ - int32x4_t __s2_345 = __p2_345; \ - __ret_345 = vqrdmlshs_s32(__s0_345, __s1_345, vgetq_lane_s32(__s2_345, __p3_345)); \ - __ret_345; \ -}) -#else -#define vqrdmlshs_laneq_s32(__p0_346, __p1_346, __p2_346, __p3_346) __extension__ ({ \ - int32_t __ret_346; \ - int32_t __s0_346 = __p0_346; \ - int32_t __s1_346 = __p1_346; \ - int32x4_t __s2_346 = __p2_346; \ - int32x4_t __rev2_346; __rev2_346 = __builtin_shufflevector(__s2_346, __s2_346, 3, 2, 1, 0); \ - __ret_346 = vqrdmlshs_s32(__s0_346, __s1_346, __noswap_vgetq_lane_s32(__rev2_346, __p3_346)); \ - __ret_346; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vqrdmlshh_laneq_s16(__p0_347, __p1_347, __p2_347, __p3_347) __extension__ ({ \ - int16_t __ret_347; \ - int16_t __s0_347 = __p0_347; \ - int16_t __s1_347 = __p1_347; \ - int16x8_t __s2_347 = __p2_347; \ - __ret_347 = vqrdmlshh_s16(__s0_347, __s1_347, vgetq_lane_s16(__s2_347, __p3_347)); \ - __ret_347; \ -}) -#else -#define vqrdmlshh_laneq_s16(__p0_348, __p1_348, __p2_348, __p3_348) __extension__ ({ \ - int16_t __ret_348; \ - int16_t __s0_348 = __p0_348; \ - int16_t __s1_348 = __p1_348; \ - int16x8_t __s2_348 = __p2_348; \ - int16x8_t __rev2_348; __rev2_348 = __builtin_shufflevector(__s2_348, __s2_348, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_348 = vqrdmlshh_s16(__s0_348, __s1_348, __noswap_vgetq_lane_s16(__rev2_348, __p3_348)); \ - __ret_348; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vqrdmlshq_laneq_s32(__p0_349, __p1_349, __p2_349, __p3_349) __extension__ ({ \ - int32x4_t __ret_349; \ - int32x4_t __s0_349 = __p0_349; \ - int32x4_t __s1_349 = __p1_349; \ - int32x4_t __s2_349 = __p2_349; \ - __ret_349 = vqrdmlshq_s32(__s0_349, __s1_349, splatq_laneq_s32(__s2_349, __p3_349)); \ - __ret_349; \ -}) -#else -#define vqrdmlshq_laneq_s32(__p0_350, __p1_350, __p2_350, __p3_350) __extension__ ({ \ - int32x4_t __ret_350; \ - int32x4_t __s0_350 = __p0_350; \ - int32x4_t __s1_350 = __p1_350; \ - int32x4_t __s2_350 = __p2_350; \ - int32x4_t __rev0_350; __rev0_350 = __builtin_shufflevector(__s0_350, __s0_350, 3, 2, 1, 0); \ - int32x4_t __rev1_350; __rev1_350 = __builtin_shufflevector(__s1_350, __s1_350, 3, 2, 1, 0); \ - int32x4_t __rev2_350; __rev2_350 = __builtin_shufflevector(__s2_350, __s2_350, 3, 2, 1, 0); \ - __ret_350 = __noswap_vqrdmlshq_s32(__rev0_350, __rev1_350, __noswap_splatq_laneq_s32(__rev2_350, __p3_350)); \ - __ret_350 = __builtin_shufflevector(__ret_350, __ret_350, 3, 2, 1, 0); \ - __ret_350; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vqrdmlshq_laneq_s16(__p0_351, __p1_351, __p2_351, __p3_351) __extension__ ({ \ - int16x8_t __ret_351; \ - int16x8_t __s0_351 = __p0_351; \ - int16x8_t __s1_351 = __p1_351; \ - int16x8_t __s2_351 = __p2_351; \ - __ret_351 = vqrdmlshq_s16(__s0_351, __s1_351, splatq_laneq_s16(__s2_351, __p3_351)); \ - __ret_351; \ -}) -#else -#define vqrdmlshq_laneq_s16(__p0_352, __p1_352, __p2_352, __p3_352) __extension__ ({ \ - int16x8_t __ret_352; \ - int16x8_t __s0_352 = __p0_352; \ - int16x8_t __s1_352 = __p1_352; \ - int16x8_t __s2_352 = __p2_352; \ - int16x8_t __rev0_352; __rev0_352 = __builtin_shufflevector(__s0_352, __s0_352, 7, 6, 5, 4, 3, 2, 1, 0); \ - int16x8_t __rev1_352; __rev1_352 = __builtin_shufflevector(__s1_352, __s1_352, 7, 6, 5, 4, 3, 2, 1, 0); \ - int16x8_t __rev2_352; __rev2_352 = __builtin_shufflevector(__s2_352, __s2_352, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_352 = __noswap_vqrdmlshq_s16(__rev0_352, __rev1_352, __noswap_splatq_laneq_s16(__rev2_352, __p3_352)); \ - __ret_352 = __builtin_shufflevector(__ret_352, __ret_352, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_352; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vqrdmlsh_laneq_s32(__p0_353, __p1_353, __p2_353, __p3_353) __extension__ ({ \ - int32x2_t __ret_353; \ - int32x2_t __s0_353 = __p0_353; \ - int32x2_t __s1_353 = __p1_353; \ - int32x4_t __s2_353 = __p2_353; \ - __ret_353 = vqrdmlsh_s32(__s0_353, __s1_353, splat_laneq_s32(__s2_353, __p3_353)); \ - __ret_353; \ -}) -#else -#define vqrdmlsh_laneq_s32(__p0_354, __p1_354, __p2_354, __p3_354) __extension__ ({ \ - int32x2_t __ret_354; \ - int32x2_t __s0_354 = __p0_354; \ - int32x2_t __s1_354 = __p1_354; \ - int32x4_t __s2_354 = __p2_354; \ - int32x2_t __rev0_354; __rev0_354 = __builtin_shufflevector(__s0_354, __s0_354, 1, 0); \ - int32x2_t __rev1_354; __rev1_354 = __builtin_shufflevector(__s1_354, __s1_354, 1, 0); \ - int32x4_t __rev2_354; __rev2_354 = __builtin_shufflevector(__s2_354, __s2_354, 3, 2, 1, 0); \ - __ret_354 = __noswap_vqrdmlsh_s32(__rev0_354, __rev1_354, __noswap_splat_laneq_s32(__rev2_354, __p3_354)); \ - __ret_354 = __builtin_shufflevector(__ret_354, __ret_354, 1, 0); \ - __ret_354; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vqrdmlsh_laneq_s16(__p0_355, __p1_355, __p2_355, __p3_355) __extension__ ({ \ - int16x4_t __ret_355; \ - int16x4_t __s0_355 = __p0_355; \ - int16x4_t __s1_355 = __p1_355; \ - int16x8_t __s2_355 = __p2_355; \ - __ret_355 = vqrdmlsh_s16(__s0_355, __s1_355, splat_laneq_s16(__s2_355, __p3_355)); \ - __ret_355; \ -}) -#else -#define vqrdmlsh_laneq_s16(__p0_356, __p1_356, __p2_356, __p3_356) __extension__ ({ \ - int16x4_t __ret_356; \ - int16x4_t __s0_356 = __p0_356; \ - int16x4_t __s1_356 = __p1_356; \ - int16x8_t __s2_356 = __p2_356; \ - int16x4_t __rev0_356; __rev0_356 = __builtin_shufflevector(__s0_356, __s0_356, 3, 2, 1, 0); \ - int16x4_t __rev1_356; __rev1_356 = __builtin_shufflevector(__s1_356, __s1_356, 3, 2, 1, 0); \ - int16x8_t __rev2_356; __rev2_356 = __builtin_shufflevector(__s2_356, __s2_356, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_356 = __noswap_vqrdmlsh_s16(__rev0_356, __rev1_356, __noswap_splat_laneq_s16(__rev2_356, __p3_356)); \ - __ret_356 = __builtin_shufflevector(__ret_356, __ret_356, 3, 2, 1, 0); \ - __ret_356; \ -}) -#endif - #endif #if defined(__aarch64__) #ifdef __LITTLE_ENDIAN__ @@ -50186,892 +44147,892 @@ __ai float64x2_t vcombine_f64(float64x1_t __p0, float64x1_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -#define vcopyq_lane_p8(__p0_357, __p1_357, __p2_357, __p3_357) __extension__ ({ \ - poly8x16_t __ret_357; \ - poly8x16_t __s0_357 = __p0_357; \ - poly8x8_t __s2_357 = __p2_357; \ - __ret_357 = vsetq_lane_p8(vget_lane_p8(__s2_357, __p3_357), __s0_357, __p1_357); \ - __ret_357; \ +#define vcopyq_lane_p8(__p0_257, __p1_257, __p2_257, __p3_257) __extension__ ({ \ + poly8x16_t __ret_257; \ + poly8x16_t __s0_257 = __p0_257; \ + poly8x8_t __s2_257 = __p2_257; \ + __ret_257 = vsetq_lane_p8(vget_lane_p8(__s2_257, __p3_257), __s0_257, __p1_257); \ + __ret_257; \ }) #else -#define vcopyq_lane_p8(__p0_358, __p1_358, __p2_358, __p3_358) __extension__ ({ \ - poly8x16_t __ret_358; \ - poly8x16_t __s0_358 = __p0_358; \ - poly8x8_t __s2_358 = __p2_358; \ - poly8x16_t __rev0_358; __rev0_358 = __builtin_shufflevector(__s0_358, __s0_358, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - poly8x8_t __rev2_358; __rev2_358 = __builtin_shufflevector(__s2_358, __s2_358, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_358 = __noswap_vsetq_lane_p8(__noswap_vget_lane_p8(__rev2_358, __p3_358), __rev0_358, __p1_358); \ - __ret_358 = __builtin_shufflevector(__ret_358, __ret_358, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_358; \ +#define vcopyq_lane_p8(__p0_258, __p1_258, __p2_258, __p3_258) __extension__ ({ \ + poly8x16_t __ret_258; \ + poly8x16_t __s0_258 = __p0_258; \ + poly8x8_t __s2_258 = __p2_258; \ + poly8x16_t __rev0_258; __rev0_258 = __builtin_shufflevector(__s0_258, __s0_258, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + poly8x8_t __rev2_258; __rev2_258 = __builtin_shufflevector(__s2_258, __s2_258, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_258 = __noswap_vsetq_lane_p8(__noswap_vget_lane_p8(__rev2_258, __p3_258), __rev0_258, __p1_258); \ + __ret_258 = __builtin_shufflevector(__ret_258, __ret_258, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_258; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vcopyq_lane_p16(__p0_359, __p1_359, __p2_359, __p3_359) __extension__ ({ \ - poly16x8_t __ret_359; \ - poly16x8_t __s0_359 = __p0_359; \ - poly16x4_t __s2_359 = __p2_359; \ - __ret_359 = vsetq_lane_p16(vget_lane_p16(__s2_359, __p3_359), __s0_359, __p1_359); \ - __ret_359; \ +#define vcopyq_lane_p16(__p0_259, __p1_259, __p2_259, __p3_259) __extension__ ({ \ + poly16x8_t __ret_259; \ + poly16x8_t __s0_259 = __p0_259; \ + poly16x4_t __s2_259 = __p2_259; \ + __ret_259 = vsetq_lane_p16(vget_lane_p16(__s2_259, __p3_259), __s0_259, __p1_259); \ + __ret_259; \ }) #else -#define vcopyq_lane_p16(__p0_360, __p1_360, __p2_360, __p3_360) __extension__ ({ \ - poly16x8_t __ret_360; \ - poly16x8_t __s0_360 = __p0_360; \ - poly16x4_t __s2_360 = __p2_360; \ - poly16x8_t __rev0_360; __rev0_360 = __builtin_shufflevector(__s0_360, __s0_360, 7, 6, 5, 4, 3, 2, 1, 0); \ - poly16x4_t __rev2_360; __rev2_360 = __builtin_shufflevector(__s2_360, __s2_360, 3, 2, 1, 0); \ - __ret_360 = __noswap_vsetq_lane_p16(__noswap_vget_lane_p16(__rev2_360, __p3_360), __rev0_360, __p1_360); \ - __ret_360 = __builtin_shufflevector(__ret_360, __ret_360, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_360; \ +#define vcopyq_lane_p16(__p0_260, __p1_260, __p2_260, __p3_260) __extension__ ({ \ + poly16x8_t __ret_260; \ + poly16x8_t __s0_260 = __p0_260; \ + poly16x4_t __s2_260 = __p2_260; \ + poly16x8_t __rev0_260; __rev0_260 = __builtin_shufflevector(__s0_260, __s0_260, 7, 6, 5, 4, 3, 2, 1, 0); \ + poly16x4_t __rev2_260; __rev2_260 = __builtin_shufflevector(__s2_260, __s2_260, 3, 2, 1, 0); \ + __ret_260 = __noswap_vsetq_lane_p16(__noswap_vget_lane_p16(__rev2_260, __p3_260), __rev0_260, __p1_260); \ + __ret_260 = __builtin_shufflevector(__ret_260, __ret_260, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_260; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vcopyq_lane_u8(__p0_361, __p1_361, __p2_361, __p3_361) __extension__ ({ \ - uint8x16_t __ret_361; \ - uint8x16_t __s0_361 = __p0_361; \ - uint8x8_t __s2_361 = __p2_361; \ - __ret_361 = vsetq_lane_u8(vget_lane_u8(__s2_361, __p3_361), __s0_361, __p1_361); \ - __ret_361; \ +#define vcopyq_lane_u8(__p0_261, __p1_261, __p2_261, __p3_261) __extension__ ({ \ + uint8x16_t __ret_261; \ + uint8x16_t __s0_261 = __p0_261; \ + uint8x8_t __s2_261 = __p2_261; \ + __ret_261 = vsetq_lane_u8(vget_lane_u8(__s2_261, __p3_261), __s0_261, __p1_261); \ + __ret_261; \ }) #else -#define vcopyq_lane_u8(__p0_362, __p1_362, __p2_362, __p3_362) __extension__ ({ \ - uint8x16_t __ret_362; \ - uint8x16_t __s0_362 = __p0_362; \ - uint8x8_t __s2_362 = __p2_362; \ - uint8x16_t __rev0_362; __rev0_362 = __builtin_shufflevector(__s0_362, __s0_362, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - uint8x8_t __rev2_362; __rev2_362 = __builtin_shufflevector(__s2_362, __s2_362, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_362 = __noswap_vsetq_lane_u8(__noswap_vget_lane_u8(__rev2_362, __p3_362), __rev0_362, __p1_362); \ - __ret_362 = __builtin_shufflevector(__ret_362, __ret_362, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_362; \ +#define vcopyq_lane_u8(__p0_262, __p1_262, __p2_262, __p3_262) __extension__ ({ \ + uint8x16_t __ret_262; \ + uint8x16_t __s0_262 = __p0_262; \ + uint8x8_t __s2_262 = __p2_262; \ + uint8x16_t __rev0_262; __rev0_262 = __builtin_shufflevector(__s0_262, __s0_262, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint8x8_t __rev2_262; __rev2_262 = __builtin_shufflevector(__s2_262, __s2_262, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_262 = __noswap_vsetq_lane_u8(__noswap_vget_lane_u8(__rev2_262, __p3_262), __rev0_262, __p1_262); \ + __ret_262 = __builtin_shufflevector(__ret_262, __ret_262, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_262; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vcopyq_lane_u32(__p0_363, __p1_363, __p2_363, __p3_363) __extension__ ({ \ - uint32x4_t __ret_363; \ - uint32x4_t __s0_363 = __p0_363; \ - uint32x2_t __s2_363 = __p2_363; \ - __ret_363 = vsetq_lane_u32(vget_lane_u32(__s2_363, __p3_363), __s0_363, __p1_363); \ - __ret_363; \ +#define vcopyq_lane_u32(__p0_263, __p1_263, __p2_263, __p3_263) __extension__ ({ \ + uint32x4_t __ret_263; \ + uint32x4_t __s0_263 = __p0_263; \ + uint32x2_t __s2_263 = __p2_263; \ + __ret_263 = vsetq_lane_u32(vget_lane_u32(__s2_263, __p3_263), __s0_263, __p1_263); \ + __ret_263; \ }) #else -#define vcopyq_lane_u32(__p0_364, __p1_364, __p2_364, __p3_364) __extension__ ({ \ - uint32x4_t __ret_364; \ - uint32x4_t __s0_364 = __p0_364; \ - uint32x2_t __s2_364 = __p2_364; \ - uint32x4_t __rev0_364; __rev0_364 = __builtin_shufflevector(__s0_364, __s0_364, 3, 2, 1, 0); \ - uint32x2_t __rev2_364; __rev2_364 = __builtin_shufflevector(__s2_364, __s2_364, 1, 0); \ - __ret_364 = __noswap_vsetq_lane_u32(__noswap_vget_lane_u32(__rev2_364, __p3_364), __rev0_364, __p1_364); \ - __ret_364 = __builtin_shufflevector(__ret_364, __ret_364, 3, 2, 1, 0); \ - __ret_364; \ +#define vcopyq_lane_u32(__p0_264, __p1_264, __p2_264, __p3_264) __extension__ ({ \ + uint32x4_t __ret_264; \ + uint32x4_t __s0_264 = __p0_264; \ + uint32x2_t __s2_264 = __p2_264; \ + uint32x4_t __rev0_264; __rev0_264 = __builtin_shufflevector(__s0_264, __s0_264, 3, 2, 1, 0); \ + uint32x2_t __rev2_264; __rev2_264 = __builtin_shufflevector(__s2_264, __s2_264, 1, 0); \ + __ret_264 = __noswap_vsetq_lane_u32(__noswap_vget_lane_u32(__rev2_264, __p3_264), __rev0_264, __p1_264); \ + __ret_264 = __builtin_shufflevector(__ret_264, __ret_264, 3, 2, 1, 0); \ + __ret_264; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vcopyq_lane_u64(__p0_365, __p1_365, __p2_365, __p3_365) __extension__ ({ \ - uint64x2_t __ret_365; \ - uint64x2_t __s0_365 = __p0_365; \ - uint64x1_t __s2_365 = __p2_365; \ - __ret_365 = vsetq_lane_u64(vget_lane_u64(__s2_365, __p3_365), __s0_365, __p1_365); \ - __ret_365; \ +#define vcopyq_lane_u64(__p0_265, __p1_265, __p2_265, __p3_265) __extension__ ({ \ + uint64x2_t __ret_265; \ + uint64x2_t __s0_265 = __p0_265; \ + uint64x1_t __s2_265 = __p2_265; \ + __ret_265 = vsetq_lane_u64(vget_lane_u64(__s2_265, __p3_265), __s0_265, __p1_265); \ + __ret_265; \ }) #else -#define vcopyq_lane_u64(__p0_366, __p1_366, __p2_366, __p3_366) __extension__ ({ \ - uint64x2_t __ret_366; \ - uint64x2_t __s0_366 = __p0_366; \ - uint64x1_t __s2_366 = __p2_366; \ - uint64x2_t __rev0_366; __rev0_366 = __builtin_shufflevector(__s0_366, __s0_366, 1, 0); \ - __ret_366 = __noswap_vsetq_lane_u64(vget_lane_u64(__s2_366, __p3_366), __rev0_366, __p1_366); \ - __ret_366 = __builtin_shufflevector(__ret_366, __ret_366, 1, 0); \ - __ret_366; \ +#define vcopyq_lane_u64(__p0_266, __p1_266, __p2_266, __p3_266) __extension__ ({ \ + uint64x2_t __ret_266; \ + uint64x2_t __s0_266 = __p0_266; \ + uint64x1_t __s2_266 = __p2_266; \ + uint64x2_t __rev0_266; __rev0_266 = __builtin_shufflevector(__s0_266, __s0_266, 1, 0); \ + __ret_266 = __noswap_vsetq_lane_u64(vget_lane_u64(__s2_266, __p3_266), __rev0_266, __p1_266); \ + __ret_266 = __builtin_shufflevector(__ret_266, __ret_266, 1, 0); \ + __ret_266; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vcopyq_lane_u16(__p0_367, __p1_367, __p2_367, __p3_367) __extension__ ({ \ - uint16x8_t __ret_367; \ - uint16x8_t __s0_367 = __p0_367; \ - uint16x4_t __s2_367 = __p2_367; \ - __ret_367 = vsetq_lane_u16(vget_lane_u16(__s2_367, __p3_367), __s0_367, __p1_367); \ - __ret_367; \ +#define vcopyq_lane_u16(__p0_267, __p1_267, __p2_267, __p3_267) __extension__ ({ \ + uint16x8_t __ret_267; \ + uint16x8_t __s0_267 = __p0_267; \ + uint16x4_t __s2_267 = __p2_267; \ + __ret_267 = vsetq_lane_u16(vget_lane_u16(__s2_267, __p3_267), __s0_267, __p1_267); \ + __ret_267; \ }) #else -#define vcopyq_lane_u16(__p0_368, __p1_368, __p2_368, __p3_368) __extension__ ({ \ - uint16x8_t __ret_368; \ - uint16x8_t __s0_368 = __p0_368; \ - uint16x4_t __s2_368 = __p2_368; \ - uint16x8_t __rev0_368; __rev0_368 = __builtin_shufflevector(__s0_368, __s0_368, 7, 6, 5, 4, 3, 2, 1, 0); \ - uint16x4_t __rev2_368; __rev2_368 = __builtin_shufflevector(__s2_368, __s2_368, 3, 2, 1, 0); \ - __ret_368 = __noswap_vsetq_lane_u16(__noswap_vget_lane_u16(__rev2_368, __p3_368), __rev0_368, __p1_368); \ - __ret_368 = __builtin_shufflevector(__ret_368, __ret_368, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_368; \ +#define vcopyq_lane_u16(__p0_268, __p1_268, __p2_268, __p3_268) __extension__ ({ \ + uint16x8_t __ret_268; \ + uint16x8_t __s0_268 = __p0_268; \ + uint16x4_t __s2_268 = __p2_268; \ + uint16x8_t __rev0_268; __rev0_268 = __builtin_shufflevector(__s0_268, __s0_268, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint16x4_t __rev2_268; __rev2_268 = __builtin_shufflevector(__s2_268, __s2_268, 3, 2, 1, 0); \ + __ret_268 = __noswap_vsetq_lane_u16(__noswap_vget_lane_u16(__rev2_268, __p3_268), __rev0_268, __p1_268); \ + __ret_268 = __builtin_shufflevector(__ret_268, __ret_268, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_268; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vcopyq_lane_s8(__p0_369, __p1_369, __p2_369, __p3_369) __extension__ ({ \ - int8x16_t __ret_369; \ - int8x16_t __s0_369 = __p0_369; \ - int8x8_t __s2_369 = __p2_369; \ - __ret_369 = vsetq_lane_s8(vget_lane_s8(__s2_369, __p3_369), __s0_369, __p1_369); \ - __ret_369; \ +#define vcopyq_lane_s8(__p0_269, __p1_269, __p2_269, __p3_269) __extension__ ({ \ + int8x16_t __ret_269; \ + int8x16_t __s0_269 = __p0_269; \ + int8x8_t __s2_269 = __p2_269; \ + __ret_269 = vsetq_lane_s8(vget_lane_s8(__s2_269, __p3_269), __s0_269, __p1_269); \ + __ret_269; \ }) #else -#define vcopyq_lane_s8(__p0_370, __p1_370, __p2_370, __p3_370) __extension__ ({ \ - int8x16_t __ret_370; \ - int8x16_t __s0_370 = __p0_370; \ - int8x8_t __s2_370 = __p2_370; \ - int8x16_t __rev0_370; __rev0_370 = __builtin_shufflevector(__s0_370, __s0_370, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - int8x8_t __rev2_370; __rev2_370 = __builtin_shufflevector(__s2_370, __s2_370, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_370 = __noswap_vsetq_lane_s8(__noswap_vget_lane_s8(__rev2_370, __p3_370), __rev0_370, __p1_370); \ - __ret_370 = __builtin_shufflevector(__ret_370, __ret_370, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_370; \ +#define vcopyq_lane_s8(__p0_270, __p1_270, __p2_270, __p3_270) __extension__ ({ \ + int8x16_t __ret_270; \ + int8x16_t __s0_270 = __p0_270; \ + int8x8_t __s2_270 = __p2_270; \ + int8x16_t __rev0_270; __rev0_270 = __builtin_shufflevector(__s0_270, __s0_270, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + int8x8_t __rev2_270; __rev2_270 = __builtin_shufflevector(__s2_270, __s2_270, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_270 = __noswap_vsetq_lane_s8(__noswap_vget_lane_s8(__rev2_270, __p3_270), __rev0_270, __p1_270); \ + __ret_270 = __builtin_shufflevector(__ret_270, __ret_270, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_270; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vcopyq_lane_f32(__p0_371, __p1_371, __p2_371, __p3_371) __extension__ ({ \ - float32x4_t __ret_371; \ - float32x4_t __s0_371 = __p0_371; \ - float32x2_t __s2_371 = __p2_371; \ - __ret_371 = vsetq_lane_f32(vget_lane_f32(__s2_371, __p3_371), __s0_371, __p1_371); \ - __ret_371; \ +#define vcopyq_lane_f32(__p0_271, __p1_271, __p2_271, __p3_271) __extension__ ({ \ + float32x4_t __ret_271; \ + float32x4_t __s0_271 = __p0_271; \ + float32x2_t __s2_271 = __p2_271; \ + __ret_271 = vsetq_lane_f32(vget_lane_f32(__s2_271, __p3_271), __s0_271, __p1_271); \ + __ret_271; \ }) #else -#define vcopyq_lane_f32(__p0_372, __p1_372, __p2_372, __p3_372) __extension__ ({ \ - float32x4_t __ret_372; \ - float32x4_t __s0_372 = __p0_372; \ - float32x2_t __s2_372 = __p2_372; \ - float32x4_t __rev0_372; __rev0_372 = __builtin_shufflevector(__s0_372, __s0_372, 3, 2, 1, 0); \ - float32x2_t __rev2_372; __rev2_372 = __builtin_shufflevector(__s2_372, __s2_372, 1, 0); \ - __ret_372 = __noswap_vsetq_lane_f32(__noswap_vget_lane_f32(__rev2_372, __p3_372), __rev0_372, __p1_372); \ - __ret_372 = __builtin_shufflevector(__ret_372, __ret_372, 3, 2, 1, 0); \ - __ret_372; \ +#define vcopyq_lane_f32(__p0_272, __p1_272, __p2_272, __p3_272) __extension__ ({ \ + float32x4_t __ret_272; \ + float32x4_t __s0_272 = __p0_272; \ + float32x2_t __s2_272 = __p2_272; \ + float32x4_t __rev0_272; __rev0_272 = __builtin_shufflevector(__s0_272, __s0_272, 3, 2, 1, 0); \ + float32x2_t __rev2_272; __rev2_272 = __builtin_shufflevector(__s2_272, __s2_272, 1, 0); \ + __ret_272 = __noswap_vsetq_lane_f32(__noswap_vget_lane_f32(__rev2_272, __p3_272), __rev0_272, __p1_272); \ + __ret_272 = __builtin_shufflevector(__ret_272, __ret_272, 3, 2, 1, 0); \ + __ret_272; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vcopyq_lane_s32(__p0_373, __p1_373, __p2_373, __p3_373) __extension__ ({ \ - int32x4_t __ret_373; \ - int32x4_t __s0_373 = __p0_373; \ - int32x2_t __s2_373 = __p2_373; \ - __ret_373 = vsetq_lane_s32(vget_lane_s32(__s2_373, __p3_373), __s0_373, __p1_373); \ - __ret_373; \ +#define vcopyq_lane_s32(__p0_273, __p1_273, __p2_273, __p3_273) __extension__ ({ \ + int32x4_t __ret_273; \ + int32x4_t __s0_273 = __p0_273; \ + int32x2_t __s2_273 = __p2_273; \ + __ret_273 = vsetq_lane_s32(vget_lane_s32(__s2_273, __p3_273), __s0_273, __p1_273); \ + __ret_273; \ }) #else -#define vcopyq_lane_s32(__p0_374, __p1_374, __p2_374, __p3_374) __extension__ ({ \ - int32x4_t __ret_374; \ - int32x4_t __s0_374 = __p0_374; \ - int32x2_t __s2_374 = __p2_374; \ - int32x4_t __rev0_374; __rev0_374 = __builtin_shufflevector(__s0_374, __s0_374, 3, 2, 1, 0); \ - int32x2_t __rev2_374; __rev2_374 = __builtin_shufflevector(__s2_374, __s2_374, 1, 0); \ - __ret_374 = __noswap_vsetq_lane_s32(__noswap_vget_lane_s32(__rev2_374, __p3_374), __rev0_374, __p1_374); \ - __ret_374 = __builtin_shufflevector(__ret_374, __ret_374, 3, 2, 1, 0); \ - __ret_374; \ +#define vcopyq_lane_s32(__p0_274, __p1_274, __p2_274, __p3_274) __extension__ ({ \ + int32x4_t __ret_274; \ + int32x4_t __s0_274 = __p0_274; \ + int32x2_t __s2_274 = __p2_274; \ + int32x4_t __rev0_274; __rev0_274 = __builtin_shufflevector(__s0_274, __s0_274, 3, 2, 1, 0); \ + int32x2_t __rev2_274; __rev2_274 = __builtin_shufflevector(__s2_274, __s2_274, 1, 0); \ + __ret_274 = __noswap_vsetq_lane_s32(__noswap_vget_lane_s32(__rev2_274, __p3_274), __rev0_274, __p1_274); \ + __ret_274 = __builtin_shufflevector(__ret_274, __ret_274, 3, 2, 1, 0); \ + __ret_274; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vcopyq_lane_s64(__p0_375, __p1_375, __p2_375, __p3_375) __extension__ ({ \ - int64x2_t __ret_375; \ - int64x2_t __s0_375 = __p0_375; \ - int64x1_t __s2_375 = __p2_375; \ - __ret_375 = vsetq_lane_s64(vget_lane_s64(__s2_375, __p3_375), __s0_375, __p1_375); \ - __ret_375; \ +#define vcopyq_lane_s64(__p0_275, __p1_275, __p2_275, __p3_275) __extension__ ({ \ + int64x2_t __ret_275; \ + int64x2_t __s0_275 = __p0_275; \ + int64x1_t __s2_275 = __p2_275; \ + __ret_275 = vsetq_lane_s64(vget_lane_s64(__s2_275, __p3_275), __s0_275, __p1_275); \ + __ret_275; \ }) #else -#define vcopyq_lane_s64(__p0_376, __p1_376, __p2_376, __p3_376) __extension__ ({ \ - int64x2_t __ret_376; \ - int64x2_t __s0_376 = __p0_376; \ - int64x1_t __s2_376 = __p2_376; \ - int64x2_t __rev0_376; __rev0_376 = __builtin_shufflevector(__s0_376, __s0_376, 1, 0); \ - __ret_376 = __noswap_vsetq_lane_s64(vget_lane_s64(__s2_376, __p3_376), __rev0_376, __p1_376); \ - __ret_376 = __builtin_shufflevector(__ret_376, __ret_376, 1, 0); \ - __ret_376; \ +#define vcopyq_lane_s64(__p0_276, __p1_276, __p2_276, __p3_276) __extension__ ({ \ + int64x2_t __ret_276; \ + int64x2_t __s0_276 = __p0_276; \ + int64x1_t __s2_276 = __p2_276; \ + int64x2_t __rev0_276; __rev0_276 = __builtin_shufflevector(__s0_276, __s0_276, 1, 0); \ + __ret_276 = __noswap_vsetq_lane_s64(vget_lane_s64(__s2_276, __p3_276), __rev0_276, __p1_276); \ + __ret_276 = __builtin_shufflevector(__ret_276, __ret_276, 1, 0); \ + __ret_276; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vcopyq_lane_s16(__p0_377, __p1_377, __p2_377, __p3_377) __extension__ ({ \ - int16x8_t __ret_377; \ - int16x8_t __s0_377 = __p0_377; \ - int16x4_t __s2_377 = __p2_377; \ - __ret_377 = vsetq_lane_s16(vget_lane_s16(__s2_377, __p3_377), __s0_377, __p1_377); \ - __ret_377; \ +#define vcopyq_lane_s16(__p0_277, __p1_277, __p2_277, __p3_277) __extension__ ({ \ + int16x8_t __ret_277; \ + int16x8_t __s0_277 = __p0_277; \ + int16x4_t __s2_277 = __p2_277; \ + __ret_277 = vsetq_lane_s16(vget_lane_s16(__s2_277, __p3_277), __s0_277, __p1_277); \ + __ret_277; \ }) #else -#define vcopyq_lane_s16(__p0_378, __p1_378, __p2_378, __p3_378) __extension__ ({ \ - int16x8_t __ret_378; \ - int16x8_t __s0_378 = __p0_378; \ - int16x4_t __s2_378 = __p2_378; \ - int16x8_t __rev0_378; __rev0_378 = __builtin_shufflevector(__s0_378, __s0_378, 7, 6, 5, 4, 3, 2, 1, 0); \ - int16x4_t __rev2_378; __rev2_378 = __builtin_shufflevector(__s2_378, __s2_378, 3, 2, 1, 0); \ - __ret_378 = __noswap_vsetq_lane_s16(__noswap_vget_lane_s16(__rev2_378, __p3_378), __rev0_378, __p1_378); \ - __ret_378 = __builtin_shufflevector(__ret_378, __ret_378, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_378; \ +#define vcopyq_lane_s16(__p0_278, __p1_278, __p2_278, __p3_278) __extension__ ({ \ + int16x8_t __ret_278; \ + int16x8_t __s0_278 = __p0_278; \ + int16x4_t __s2_278 = __p2_278; \ + int16x8_t __rev0_278; __rev0_278 = __builtin_shufflevector(__s0_278, __s0_278, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x4_t __rev2_278; __rev2_278 = __builtin_shufflevector(__s2_278, __s2_278, 3, 2, 1, 0); \ + __ret_278 = __noswap_vsetq_lane_s16(__noswap_vget_lane_s16(__rev2_278, __p3_278), __rev0_278, __p1_278); \ + __ret_278 = __builtin_shufflevector(__ret_278, __ret_278, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_278; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vcopy_lane_p8(__p0_379, __p1_379, __p2_379, __p3_379) __extension__ ({ \ - poly8x8_t __ret_379; \ - poly8x8_t __s0_379 = __p0_379; \ - poly8x8_t __s2_379 = __p2_379; \ - __ret_379 = vset_lane_p8(vget_lane_p8(__s2_379, __p3_379), __s0_379, __p1_379); \ - __ret_379; \ +#define vcopy_lane_p8(__p0_279, __p1_279, __p2_279, __p3_279) __extension__ ({ \ + poly8x8_t __ret_279; \ + poly8x8_t __s0_279 = __p0_279; \ + poly8x8_t __s2_279 = __p2_279; \ + __ret_279 = vset_lane_p8(vget_lane_p8(__s2_279, __p3_279), __s0_279, __p1_279); \ + __ret_279; \ }) #else -#define vcopy_lane_p8(__p0_380, __p1_380, __p2_380, __p3_380) __extension__ ({ \ - poly8x8_t __ret_380; \ - poly8x8_t __s0_380 = __p0_380; \ - poly8x8_t __s2_380 = __p2_380; \ - poly8x8_t __rev0_380; __rev0_380 = __builtin_shufflevector(__s0_380, __s0_380, 7, 6, 5, 4, 3, 2, 1, 0); \ - poly8x8_t __rev2_380; __rev2_380 = __builtin_shufflevector(__s2_380, __s2_380, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_380 = __noswap_vset_lane_p8(__noswap_vget_lane_p8(__rev2_380, __p3_380), __rev0_380, __p1_380); \ - __ret_380 = __builtin_shufflevector(__ret_380, __ret_380, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_380; \ +#define vcopy_lane_p8(__p0_280, __p1_280, __p2_280, __p3_280) __extension__ ({ \ + poly8x8_t __ret_280; \ + poly8x8_t __s0_280 = __p0_280; \ + poly8x8_t __s2_280 = __p2_280; \ + poly8x8_t __rev0_280; __rev0_280 = __builtin_shufflevector(__s0_280, __s0_280, 7, 6, 5, 4, 3, 2, 1, 0); \ + poly8x8_t __rev2_280; __rev2_280 = __builtin_shufflevector(__s2_280, __s2_280, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_280 = __noswap_vset_lane_p8(__noswap_vget_lane_p8(__rev2_280, __p3_280), __rev0_280, __p1_280); \ + __ret_280 = __builtin_shufflevector(__ret_280, __ret_280, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_280; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vcopy_lane_p16(__p0_381, __p1_381, __p2_381, __p3_381) __extension__ ({ \ - poly16x4_t __ret_381; \ - poly16x4_t __s0_381 = __p0_381; \ - poly16x4_t __s2_381 = __p2_381; \ - __ret_381 = vset_lane_p16(vget_lane_p16(__s2_381, __p3_381), __s0_381, __p1_381); \ - __ret_381; \ +#define vcopy_lane_p16(__p0_281, __p1_281, __p2_281, __p3_281) __extension__ ({ \ + poly16x4_t __ret_281; \ + poly16x4_t __s0_281 = __p0_281; \ + poly16x4_t __s2_281 = __p2_281; \ + __ret_281 = vset_lane_p16(vget_lane_p16(__s2_281, __p3_281), __s0_281, __p1_281); \ + __ret_281; \ }) #else -#define vcopy_lane_p16(__p0_382, __p1_382, __p2_382, __p3_382) __extension__ ({ \ - poly16x4_t __ret_382; \ - poly16x4_t __s0_382 = __p0_382; \ - poly16x4_t __s2_382 = __p2_382; \ - poly16x4_t __rev0_382; __rev0_382 = __builtin_shufflevector(__s0_382, __s0_382, 3, 2, 1, 0); \ - poly16x4_t __rev2_382; __rev2_382 = __builtin_shufflevector(__s2_382, __s2_382, 3, 2, 1, 0); \ - __ret_382 = __noswap_vset_lane_p16(__noswap_vget_lane_p16(__rev2_382, __p3_382), __rev0_382, __p1_382); \ - __ret_382 = __builtin_shufflevector(__ret_382, __ret_382, 3, 2, 1, 0); \ - __ret_382; \ +#define vcopy_lane_p16(__p0_282, __p1_282, __p2_282, __p3_282) __extension__ ({ \ + poly16x4_t __ret_282; \ + poly16x4_t __s0_282 = __p0_282; \ + poly16x4_t __s2_282 = __p2_282; \ + poly16x4_t __rev0_282; __rev0_282 = __builtin_shufflevector(__s0_282, __s0_282, 3, 2, 1, 0); \ + poly16x4_t __rev2_282; __rev2_282 = __builtin_shufflevector(__s2_282, __s2_282, 3, 2, 1, 0); \ + __ret_282 = __noswap_vset_lane_p16(__noswap_vget_lane_p16(__rev2_282, __p3_282), __rev0_282, __p1_282); \ + __ret_282 = __builtin_shufflevector(__ret_282, __ret_282, 3, 2, 1, 0); \ + __ret_282; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vcopy_lane_u8(__p0_383, __p1_383, __p2_383, __p3_383) __extension__ ({ \ - uint8x8_t __ret_383; \ - uint8x8_t __s0_383 = __p0_383; \ - uint8x8_t __s2_383 = __p2_383; \ - __ret_383 = vset_lane_u8(vget_lane_u8(__s2_383, __p3_383), __s0_383, __p1_383); \ - __ret_383; \ +#define vcopy_lane_u8(__p0_283, __p1_283, __p2_283, __p3_283) __extension__ ({ \ + uint8x8_t __ret_283; \ + uint8x8_t __s0_283 = __p0_283; \ + uint8x8_t __s2_283 = __p2_283; \ + __ret_283 = vset_lane_u8(vget_lane_u8(__s2_283, __p3_283), __s0_283, __p1_283); \ + __ret_283; \ }) #else -#define vcopy_lane_u8(__p0_384, __p1_384, __p2_384, __p3_384) __extension__ ({ \ - uint8x8_t __ret_384; \ - uint8x8_t __s0_384 = __p0_384; \ - uint8x8_t __s2_384 = __p2_384; \ - uint8x8_t __rev0_384; __rev0_384 = __builtin_shufflevector(__s0_384, __s0_384, 7, 6, 5, 4, 3, 2, 1, 0); \ - uint8x8_t __rev2_384; __rev2_384 = __builtin_shufflevector(__s2_384, __s2_384, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_384 = __noswap_vset_lane_u8(__noswap_vget_lane_u8(__rev2_384, __p3_384), __rev0_384, __p1_384); \ - __ret_384 = __builtin_shufflevector(__ret_384, __ret_384, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_384; \ +#define vcopy_lane_u8(__p0_284, __p1_284, __p2_284, __p3_284) __extension__ ({ \ + uint8x8_t __ret_284; \ + uint8x8_t __s0_284 = __p0_284; \ + uint8x8_t __s2_284 = __p2_284; \ + uint8x8_t __rev0_284; __rev0_284 = __builtin_shufflevector(__s0_284, __s0_284, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint8x8_t __rev2_284; __rev2_284 = __builtin_shufflevector(__s2_284, __s2_284, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_284 = __noswap_vset_lane_u8(__noswap_vget_lane_u8(__rev2_284, __p3_284), __rev0_284, __p1_284); \ + __ret_284 = __builtin_shufflevector(__ret_284, __ret_284, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_284; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vcopy_lane_u32(__p0_385, __p1_385, __p2_385, __p3_385) __extension__ ({ \ - uint32x2_t __ret_385; \ - uint32x2_t __s0_385 = __p0_385; \ - uint32x2_t __s2_385 = __p2_385; \ - __ret_385 = vset_lane_u32(vget_lane_u32(__s2_385, __p3_385), __s0_385, __p1_385); \ - __ret_385; \ +#define vcopy_lane_u32(__p0_285, __p1_285, __p2_285, __p3_285) __extension__ ({ \ + uint32x2_t __ret_285; \ + uint32x2_t __s0_285 = __p0_285; \ + uint32x2_t __s2_285 = __p2_285; \ + __ret_285 = vset_lane_u32(vget_lane_u32(__s2_285, __p3_285), __s0_285, __p1_285); \ + __ret_285; \ }) #else -#define vcopy_lane_u32(__p0_386, __p1_386, __p2_386, __p3_386) __extension__ ({ \ - uint32x2_t __ret_386; \ - uint32x2_t __s0_386 = __p0_386; \ - uint32x2_t __s2_386 = __p2_386; \ - uint32x2_t __rev0_386; __rev0_386 = __builtin_shufflevector(__s0_386, __s0_386, 1, 0); \ - uint32x2_t __rev2_386; __rev2_386 = __builtin_shufflevector(__s2_386, __s2_386, 1, 0); \ - __ret_386 = __noswap_vset_lane_u32(__noswap_vget_lane_u32(__rev2_386, __p3_386), __rev0_386, __p1_386); \ - __ret_386 = __builtin_shufflevector(__ret_386, __ret_386, 1, 0); \ - __ret_386; \ +#define vcopy_lane_u32(__p0_286, __p1_286, __p2_286, __p3_286) __extension__ ({ \ + uint32x2_t __ret_286; \ + uint32x2_t __s0_286 = __p0_286; \ + uint32x2_t __s2_286 = __p2_286; \ + uint32x2_t __rev0_286; __rev0_286 = __builtin_shufflevector(__s0_286, __s0_286, 1, 0); \ + uint32x2_t __rev2_286; __rev2_286 = __builtin_shufflevector(__s2_286, __s2_286, 1, 0); \ + __ret_286 = __noswap_vset_lane_u32(__noswap_vget_lane_u32(__rev2_286, __p3_286), __rev0_286, __p1_286); \ + __ret_286 = __builtin_shufflevector(__ret_286, __ret_286, 1, 0); \ + __ret_286; \ }) #endif -#define vcopy_lane_u64(__p0_387, __p1_387, __p2_387, __p3_387) __extension__ ({ \ - uint64x1_t __ret_387; \ - uint64x1_t __s0_387 = __p0_387; \ - uint64x1_t __s2_387 = __p2_387; \ - __ret_387 = vset_lane_u64(vget_lane_u64(__s2_387, __p3_387), __s0_387, __p1_387); \ - __ret_387; \ +#define vcopy_lane_u64(__p0_287, __p1_287, __p2_287, __p3_287) __extension__ ({ \ + uint64x1_t __ret_287; \ + uint64x1_t __s0_287 = __p0_287; \ + uint64x1_t __s2_287 = __p2_287; \ + __ret_287 = vset_lane_u64(vget_lane_u64(__s2_287, __p3_287), __s0_287, __p1_287); \ + __ret_287; \ }) #ifdef __LITTLE_ENDIAN__ -#define vcopy_lane_u16(__p0_388, __p1_388, __p2_388, __p3_388) __extension__ ({ \ - uint16x4_t __ret_388; \ - uint16x4_t __s0_388 = __p0_388; \ - uint16x4_t __s2_388 = __p2_388; \ - __ret_388 = vset_lane_u16(vget_lane_u16(__s2_388, __p3_388), __s0_388, __p1_388); \ - __ret_388; \ +#define vcopy_lane_u16(__p0_288, __p1_288, __p2_288, __p3_288) __extension__ ({ \ + uint16x4_t __ret_288; \ + uint16x4_t __s0_288 = __p0_288; \ + uint16x4_t __s2_288 = __p2_288; \ + __ret_288 = vset_lane_u16(vget_lane_u16(__s2_288, __p3_288), __s0_288, __p1_288); \ + __ret_288; \ }) #else -#define vcopy_lane_u16(__p0_389, __p1_389, __p2_389, __p3_389) __extension__ ({ \ - uint16x4_t __ret_389; \ - uint16x4_t __s0_389 = __p0_389; \ - uint16x4_t __s2_389 = __p2_389; \ - uint16x4_t __rev0_389; __rev0_389 = __builtin_shufflevector(__s0_389, __s0_389, 3, 2, 1, 0); \ - uint16x4_t __rev2_389; __rev2_389 = __builtin_shufflevector(__s2_389, __s2_389, 3, 2, 1, 0); \ - __ret_389 = __noswap_vset_lane_u16(__noswap_vget_lane_u16(__rev2_389, __p3_389), __rev0_389, __p1_389); \ - __ret_389 = __builtin_shufflevector(__ret_389, __ret_389, 3, 2, 1, 0); \ - __ret_389; \ +#define vcopy_lane_u16(__p0_289, __p1_289, __p2_289, __p3_289) __extension__ ({ \ + uint16x4_t __ret_289; \ + uint16x4_t __s0_289 = __p0_289; \ + uint16x4_t __s2_289 = __p2_289; \ + uint16x4_t __rev0_289; __rev0_289 = __builtin_shufflevector(__s0_289, __s0_289, 3, 2, 1, 0); \ + uint16x4_t __rev2_289; __rev2_289 = __builtin_shufflevector(__s2_289, __s2_289, 3, 2, 1, 0); \ + __ret_289 = __noswap_vset_lane_u16(__noswap_vget_lane_u16(__rev2_289, __p3_289), __rev0_289, __p1_289); \ + __ret_289 = __builtin_shufflevector(__ret_289, __ret_289, 3, 2, 1, 0); \ + __ret_289; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vcopy_lane_s8(__p0_390, __p1_390, __p2_390, __p3_390) __extension__ ({ \ - int8x8_t __ret_390; \ - int8x8_t __s0_390 = __p0_390; \ - int8x8_t __s2_390 = __p2_390; \ - __ret_390 = vset_lane_s8(vget_lane_s8(__s2_390, __p3_390), __s0_390, __p1_390); \ - __ret_390; \ +#define vcopy_lane_s8(__p0_290, __p1_290, __p2_290, __p3_290) __extension__ ({ \ + int8x8_t __ret_290; \ + int8x8_t __s0_290 = __p0_290; \ + int8x8_t __s2_290 = __p2_290; \ + __ret_290 = vset_lane_s8(vget_lane_s8(__s2_290, __p3_290), __s0_290, __p1_290); \ + __ret_290; \ }) #else -#define vcopy_lane_s8(__p0_391, __p1_391, __p2_391, __p3_391) __extension__ ({ \ - int8x8_t __ret_391; \ - int8x8_t __s0_391 = __p0_391; \ - int8x8_t __s2_391 = __p2_391; \ - int8x8_t __rev0_391; __rev0_391 = __builtin_shufflevector(__s0_391, __s0_391, 7, 6, 5, 4, 3, 2, 1, 0); \ - int8x8_t __rev2_391; __rev2_391 = __builtin_shufflevector(__s2_391, __s2_391, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_391 = __noswap_vset_lane_s8(__noswap_vget_lane_s8(__rev2_391, __p3_391), __rev0_391, __p1_391); \ - __ret_391 = __builtin_shufflevector(__ret_391, __ret_391, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_391; \ +#define vcopy_lane_s8(__p0_291, __p1_291, __p2_291, __p3_291) __extension__ ({ \ + int8x8_t __ret_291; \ + int8x8_t __s0_291 = __p0_291; \ + int8x8_t __s2_291 = __p2_291; \ + int8x8_t __rev0_291; __rev0_291 = __builtin_shufflevector(__s0_291, __s0_291, 7, 6, 5, 4, 3, 2, 1, 0); \ + int8x8_t __rev2_291; __rev2_291 = __builtin_shufflevector(__s2_291, __s2_291, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_291 = __noswap_vset_lane_s8(__noswap_vget_lane_s8(__rev2_291, __p3_291), __rev0_291, __p1_291); \ + __ret_291 = __builtin_shufflevector(__ret_291, __ret_291, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_291; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vcopy_lane_f32(__p0_392, __p1_392, __p2_392, __p3_392) __extension__ ({ \ - float32x2_t __ret_392; \ - float32x2_t __s0_392 = __p0_392; \ - float32x2_t __s2_392 = __p2_392; \ - __ret_392 = vset_lane_f32(vget_lane_f32(__s2_392, __p3_392), __s0_392, __p1_392); \ - __ret_392; \ +#define vcopy_lane_f32(__p0_292, __p1_292, __p2_292, __p3_292) __extension__ ({ \ + float32x2_t __ret_292; \ + float32x2_t __s0_292 = __p0_292; \ + float32x2_t __s2_292 = __p2_292; \ + __ret_292 = vset_lane_f32(vget_lane_f32(__s2_292, __p3_292), __s0_292, __p1_292); \ + __ret_292; \ }) #else -#define vcopy_lane_f32(__p0_393, __p1_393, __p2_393, __p3_393) __extension__ ({ \ - float32x2_t __ret_393; \ - float32x2_t __s0_393 = __p0_393; \ - float32x2_t __s2_393 = __p2_393; \ - float32x2_t __rev0_393; __rev0_393 = __builtin_shufflevector(__s0_393, __s0_393, 1, 0); \ - float32x2_t __rev2_393; __rev2_393 = __builtin_shufflevector(__s2_393, __s2_393, 1, 0); \ - __ret_393 = __noswap_vset_lane_f32(__noswap_vget_lane_f32(__rev2_393, __p3_393), __rev0_393, __p1_393); \ - __ret_393 = __builtin_shufflevector(__ret_393, __ret_393, 1, 0); \ - __ret_393; \ +#define vcopy_lane_f32(__p0_293, __p1_293, __p2_293, __p3_293) __extension__ ({ \ + float32x2_t __ret_293; \ + float32x2_t __s0_293 = __p0_293; \ + float32x2_t __s2_293 = __p2_293; \ + float32x2_t __rev0_293; __rev0_293 = __builtin_shufflevector(__s0_293, __s0_293, 1, 0); \ + float32x2_t __rev2_293; __rev2_293 = __builtin_shufflevector(__s2_293, __s2_293, 1, 0); \ + __ret_293 = __noswap_vset_lane_f32(__noswap_vget_lane_f32(__rev2_293, __p3_293), __rev0_293, __p1_293); \ + __ret_293 = __builtin_shufflevector(__ret_293, __ret_293, 1, 0); \ + __ret_293; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vcopy_lane_s32(__p0_394, __p1_394, __p2_394, __p3_394) __extension__ ({ \ - int32x2_t __ret_394; \ - int32x2_t __s0_394 = __p0_394; \ - int32x2_t __s2_394 = __p2_394; \ - __ret_394 = vset_lane_s32(vget_lane_s32(__s2_394, __p3_394), __s0_394, __p1_394); \ - __ret_394; \ +#define vcopy_lane_s32(__p0_294, __p1_294, __p2_294, __p3_294) __extension__ ({ \ + int32x2_t __ret_294; \ + int32x2_t __s0_294 = __p0_294; \ + int32x2_t __s2_294 = __p2_294; \ + __ret_294 = vset_lane_s32(vget_lane_s32(__s2_294, __p3_294), __s0_294, __p1_294); \ + __ret_294; \ }) #else -#define vcopy_lane_s32(__p0_395, __p1_395, __p2_395, __p3_395) __extension__ ({ \ - int32x2_t __ret_395; \ - int32x2_t __s0_395 = __p0_395; \ - int32x2_t __s2_395 = __p2_395; \ - int32x2_t __rev0_395; __rev0_395 = __builtin_shufflevector(__s0_395, __s0_395, 1, 0); \ - int32x2_t __rev2_395; __rev2_395 = __builtin_shufflevector(__s2_395, __s2_395, 1, 0); \ - __ret_395 = __noswap_vset_lane_s32(__noswap_vget_lane_s32(__rev2_395, __p3_395), __rev0_395, __p1_395); \ - __ret_395 = __builtin_shufflevector(__ret_395, __ret_395, 1, 0); \ - __ret_395; \ +#define vcopy_lane_s32(__p0_295, __p1_295, __p2_295, __p3_295) __extension__ ({ \ + int32x2_t __ret_295; \ + int32x2_t __s0_295 = __p0_295; \ + int32x2_t __s2_295 = __p2_295; \ + int32x2_t __rev0_295; __rev0_295 = __builtin_shufflevector(__s0_295, __s0_295, 1, 0); \ + int32x2_t __rev2_295; __rev2_295 = __builtin_shufflevector(__s2_295, __s2_295, 1, 0); \ + __ret_295 = __noswap_vset_lane_s32(__noswap_vget_lane_s32(__rev2_295, __p3_295), __rev0_295, __p1_295); \ + __ret_295 = __builtin_shufflevector(__ret_295, __ret_295, 1, 0); \ + __ret_295; \ }) #endif -#define vcopy_lane_s64(__p0_396, __p1_396, __p2_396, __p3_396) __extension__ ({ \ - int64x1_t __ret_396; \ - int64x1_t __s0_396 = __p0_396; \ - int64x1_t __s2_396 = __p2_396; \ - __ret_396 = vset_lane_s64(vget_lane_s64(__s2_396, __p3_396), __s0_396, __p1_396); \ - __ret_396; \ +#define vcopy_lane_s64(__p0_296, __p1_296, __p2_296, __p3_296) __extension__ ({ \ + int64x1_t __ret_296; \ + int64x1_t __s0_296 = __p0_296; \ + int64x1_t __s2_296 = __p2_296; \ + __ret_296 = vset_lane_s64(vget_lane_s64(__s2_296, __p3_296), __s0_296, __p1_296); \ + __ret_296; \ }) #ifdef __LITTLE_ENDIAN__ -#define vcopy_lane_s16(__p0_397, __p1_397, __p2_397, __p3_397) __extension__ ({ \ - int16x4_t __ret_397; \ - int16x4_t __s0_397 = __p0_397; \ - int16x4_t __s2_397 = __p2_397; \ - __ret_397 = vset_lane_s16(vget_lane_s16(__s2_397, __p3_397), __s0_397, __p1_397); \ - __ret_397; \ +#define vcopy_lane_s16(__p0_297, __p1_297, __p2_297, __p3_297) __extension__ ({ \ + int16x4_t __ret_297; \ + int16x4_t __s0_297 = __p0_297; \ + int16x4_t __s2_297 = __p2_297; \ + __ret_297 = vset_lane_s16(vget_lane_s16(__s2_297, __p3_297), __s0_297, __p1_297); \ + __ret_297; \ }) #else -#define vcopy_lane_s16(__p0_398, __p1_398, __p2_398, __p3_398) __extension__ ({ \ - int16x4_t __ret_398; \ - int16x4_t __s0_398 = __p0_398; \ - int16x4_t __s2_398 = __p2_398; \ - int16x4_t __rev0_398; __rev0_398 = __builtin_shufflevector(__s0_398, __s0_398, 3, 2, 1, 0); \ - int16x4_t __rev2_398; __rev2_398 = __builtin_shufflevector(__s2_398, __s2_398, 3, 2, 1, 0); \ - __ret_398 = __noswap_vset_lane_s16(__noswap_vget_lane_s16(__rev2_398, __p3_398), __rev0_398, __p1_398); \ - __ret_398 = __builtin_shufflevector(__ret_398, __ret_398, 3, 2, 1, 0); \ - __ret_398; \ +#define vcopy_lane_s16(__p0_298, __p1_298, __p2_298, __p3_298) __extension__ ({ \ + int16x4_t __ret_298; \ + int16x4_t __s0_298 = __p0_298; \ + int16x4_t __s2_298 = __p2_298; \ + int16x4_t __rev0_298; __rev0_298 = __builtin_shufflevector(__s0_298, __s0_298, 3, 2, 1, 0); \ + int16x4_t __rev2_298; __rev2_298 = __builtin_shufflevector(__s2_298, __s2_298, 3, 2, 1, 0); \ + __ret_298 = __noswap_vset_lane_s16(__noswap_vget_lane_s16(__rev2_298, __p3_298), __rev0_298, __p1_298); \ + __ret_298 = __builtin_shufflevector(__ret_298, __ret_298, 3, 2, 1, 0); \ + __ret_298; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vcopyq_laneq_p8(__p0_399, __p1_399, __p2_399, __p3_399) __extension__ ({ \ - poly8x16_t __ret_399; \ - poly8x16_t __s0_399 = __p0_399; \ - poly8x16_t __s2_399 = __p2_399; \ - __ret_399 = vsetq_lane_p8(vgetq_lane_p8(__s2_399, __p3_399), __s0_399, __p1_399); \ - __ret_399; \ +#define vcopyq_laneq_p8(__p0_299, __p1_299, __p2_299, __p3_299) __extension__ ({ \ + poly8x16_t __ret_299; \ + poly8x16_t __s0_299 = __p0_299; \ + poly8x16_t __s2_299 = __p2_299; \ + __ret_299 = vsetq_lane_p8(vgetq_lane_p8(__s2_299, __p3_299), __s0_299, __p1_299); \ + __ret_299; \ }) #else -#define vcopyq_laneq_p8(__p0_400, __p1_400, __p2_400, __p3_400) __extension__ ({ \ - poly8x16_t __ret_400; \ - poly8x16_t __s0_400 = __p0_400; \ - poly8x16_t __s2_400 = __p2_400; \ - poly8x16_t __rev0_400; __rev0_400 = __builtin_shufflevector(__s0_400, __s0_400, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - poly8x16_t __rev2_400; __rev2_400 = __builtin_shufflevector(__s2_400, __s2_400, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_400 = __noswap_vsetq_lane_p8(__noswap_vgetq_lane_p8(__rev2_400, __p3_400), __rev0_400, __p1_400); \ - __ret_400 = __builtin_shufflevector(__ret_400, __ret_400, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_400; \ +#define vcopyq_laneq_p8(__p0_300, __p1_300, __p2_300, __p3_300) __extension__ ({ \ + poly8x16_t __ret_300; \ + poly8x16_t __s0_300 = __p0_300; \ + poly8x16_t __s2_300 = __p2_300; \ + poly8x16_t __rev0_300; __rev0_300 = __builtin_shufflevector(__s0_300, __s0_300, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + poly8x16_t __rev2_300; __rev2_300 = __builtin_shufflevector(__s2_300, __s2_300, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_300 = __noswap_vsetq_lane_p8(__noswap_vgetq_lane_p8(__rev2_300, __p3_300), __rev0_300, __p1_300); \ + __ret_300 = __builtin_shufflevector(__ret_300, __ret_300, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_300; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vcopyq_laneq_p16(__p0_401, __p1_401, __p2_401, __p3_401) __extension__ ({ \ - poly16x8_t __ret_401; \ - poly16x8_t __s0_401 = __p0_401; \ - poly16x8_t __s2_401 = __p2_401; \ - __ret_401 = vsetq_lane_p16(vgetq_lane_p16(__s2_401, __p3_401), __s0_401, __p1_401); \ - __ret_401; \ +#define vcopyq_laneq_p16(__p0_301, __p1_301, __p2_301, __p3_301) __extension__ ({ \ + poly16x8_t __ret_301; \ + poly16x8_t __s0_301 = __p0_301; \ + poly16x8_t __s2_301 = __p2_301; \ + __ret_301 = vsetq_lane_p16(vgetq_lane_p16(__s2_301, __p3_301), __s0_301, __p1_301); \ + __ret_301; \ }) #else -#define vcopyq_laneq_p16(__p0_402, __p1_402, __p2_402, __p3_402) __extension__ ({ \ - poly16x8_t __ret_402; \ - poly16x8_t __s0_402 = __p0_402; \ - poly16x8_t __s2_402 = __p2_402; \ - poly16x8_t __rev0_402; __rev0_402 = __builtin_shufflevector(__s0_402, __s0_402, 7, 6, 5, 4, 3, 2, 1, 0); \ - poly16x8_t __rev2_402; __rev2_402 = __builtin_shufflevector(__s2_402, __s2_402, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_402 = __noswap_vsetq_lane_p16(__noswap_vgetq_lane_p16(__rev2_402, __p3_402), __rev0_402, __p1_402); \ - __ret_402 = __builtin_shufflevector(__ret_402, __ret_402, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_402; \ +#define vcopyq_laneq_p16(__p0_302, __p1_302, __p2_302, __p3_302) __extension__ ({ \ + poly16x8_t __ret_302; \ + poly16x8_t __s0_302 = __p0_302; \ + poly16x8_t __s2_302 = __p2_302; \ + poly16x8_t __rev0_302; __rev0_302 = __builtin_shufflevector(__s0_302, __s0_302, 7, 6, 5, 4, 3, 2, 1, 0); \ + poly16x8_t __rev2_302; __rev2_302 = __builtin_shufflevector(__s2_302, __s2_302, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_302 = __noswap_vsetq_lane_p16(__noswap_vgetq_lane_p16(__rev2_302, __p3_302), __rev0_302, __p1_302); \ + __ret_302 = __builtin_shufflevector(__ret_302, __ret_302, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_302; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vcopyq_laneq_u8(__p0_403, __p1_403, __p2_403, __p3_403) __extension__ ({ \ - uint8x16_t __ret_403; \ - uint8x16_t __s0_403 = __p0_403; \ - uint8x16_t __s2_403 = __p2_403; \ - __ret_403 = vsetq_lane_u8(vgetq_lane_u8(__s2_403, __p3_403), __s0_403, __p1_403); \ - __ret_403; \ +#define vcopyq_laneq_u8(__p0_303, __p1_303, __p2_303, __p3_303) __extension__ ({ \ + uint8x16_t __ret_303; \ + uint8x16_t __s0_303 = __p0_303; \ + uint8x16_t __s2_303 = __p2_303; \ + __ret_303 = vsetq_lane_u8(vgetq_lane_u8(__s2_303, __p3_303), __s0_303, __p1_303); \ + __ret_303; \ }) #else -#define vcopyq_laneq_u8(__p0_404, __p1_404, __p2_404, __p3_404) __extension__ ({ \ - uint8x16_t __ret_404; \ - uint8x16_t __s0_404 = __p0_404; \ - uint8x16_t __s2_404 = __p2_404; \ - uint8x16_t __rev0_404; __rev0_404 = __builtin_shufflevector(__s0_404, __s0_404, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - uint8x16_t __rev2_404; __rev2_404 = __builtin_shufflevector(__s2_404, __s2_404, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_404 = __noswap_vsetq_lane_u8(__noswap_vgetq_lane_u8(__rev2_404, __p3_404), __rev0_404, __p1_404); \ - __ret_404 = __builtin_shufflevector(__ret_404, __ret_404, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_404; \ +#define vcopyq_laneq_u8(__p0_304, __p1_304, __p2_304, __p3_304) __extension__ ({ \ + uint8x16_t __ret_304; \ + uint8x16_t __s0_304 = __p0_304; \ + uint8x16_t __s2_304 = __p2_304; \ + uint8x16_t __rev0_304; __rev0_304 = __builtin_shufflevector(__s0_304, __s0_304, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint8x16_t __rev2_304; __rev2_304 = __builtin_shufflevector(__s2_304, __s2_304, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_304 = __noswap_vsetq_lane_u8(__noswap_vgetq_lane_u8(__rev2_304, __p3_304), __rev0_304, __p1_304); \ + __ret_304 = __builtin_shufflevector(__ret_304, __ret_304, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_304; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vcopyq_laneq_u32(__p0_405, __p1_405, __p2_405, __p3_405) __extension__ ({ \ - uint32x4_t __ret_405; \ - uint32x4_t __s0_405 = __p0_405; \ - uint32x4_t __s2_405 = __p2_405; \ - __ret_405 = vsetq_lane_u32(vgetq_lane_u32(__s2_405, __p3_405), __s0_405, __p1_405); \ - __ret_405; \ +#define vcopyq_laneq_u32(__p0_305, __p1_305, __p2_305, __p3_305) __extension__ ({ \ + uint32x4_t __ret_305; \ + uint32x4_t __s0_305 = __p0_305; \ + uint32x4_t __s2_305 = __p2_305; \ + __ret_305 = vsetq_lane_u32(vgetq_lane_u32(__s2_305, __p3_305), __s0_305, __p1_305); \ + __ret_305; \ }) #else -#define vcopyq_laneq_u32(__p0_406, __p1_406, __p2_406, __p3_406) __extension__ ({ \ - uint32x4_t __ret_406; \ - uint32x4_t __s0_406 = __p0_406; \ - uint32x4_t __s2_406 = __p2_406; \ - uint32x4_t __rev0_406; __rev0_406 = __builtin_shufflevector(__s0_406, __s0_406, 3, 2, 1, 0); \ - uint32x4_t __rev2_406; __rev2_406 = __builtin_shufflevector(__s2_406, __s2_406, 3, 2, 1, 0); \ - __ret_406 = __noswap_vsetq_lane_u32(__noswap_vgetq_lane_u32(__rev2_406, __p3_406), __rev0_406, __p1_406); \ - __ret_406 = __builtin_shufflevector(__ret_406, __ret_406, 3, 2, 1, 0); \ - __ret_406; \ +#define vcopyq_laneq_u32(__p0_306, __p1_306, __p2_306, __p3_306) __extension__ ({ \ + uint32x4_t __ret_306; \ + uint32x4_t __s0_306 = __p0_306; \ + uint32x4_t __s2_306 = __p2_306; \ + uint32x4_t __rev0_306; __rev0_306 = __builtin_shufflevector(__s0_306, __s0_306, 3, 2, 1, 0); \ + uint32x4_t __rev2_306; __rev2_306 = __builtin_shufflevector(__s2_306, __s2_306, 3, 2, 1, 0); \ + __ret_306 = __noswap_vsetq_lane_u32(__noswap_vgetq_lane_u32(__rev2_306, __p3_306), __rev0_306, __p1_306); \ + __ret_306 = __builtin_shufflevector(__ret_306, __ret_306, 3, 2, 1, 0); \ + __ret_306; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vcopyq_laneq_u64(__p0_407, __p1_407, __p2_407, __p3_407) __extension__ ({ \ - uint64x2_t __ret_407; \ - uint64x2_t __s0_407 = __p0_407; \ - uint64x2_t __s2_407 = __p2_407; \ - __ret_407 = vsetq_lane_u64(vgetq_lane_u64(__s2_407, __p3_407), __s0_407, __p1_407); \ - __ret_407; \ +#define vcopyq_laneq_u64(__p0_307, __p1_307, __p2_307, __p3_307) __extension__ ({ \ + uint64x2_t __ret_307; \ + uint64x2_t __s0_307 = __p0_307; \ + uint64x2_t __s2_307 = __p2_307; \ + __ret_307 = vsetq_lane_u64(vgetq_lane_u64(__s2_307, __p3_307), __s0_307, __p1_307); \ + __ret_307; \ }) #else -#define vcopyq_laneq_u64(__p0_408, __p1_408, __p2_408, __p3_408) __extension__ ({ \ - uint64x2_t __ret_408; \ - uint64x2_t __s0_408 = __p0_408; \ - uint64x2_t __s2_408 = __p2_408; \ - uint64x2_t __rev0_408; __rev0_408 = __builtin_shufflevector(__s0_408, __s0_408, 1, 0); \ - uint64x2_t __rev2_408; __rev2_408 = __builtin_shufflevector(__s2_408, __s2_408, 1, 0); \ - __ret_408 = __noswap_vsetq_lane_u64(__noswap_vgetq_lane_u64(__rev2_408, __p3_408), __rev0_408, __p1_408); \ - __ret_408 = __builtin_shufflevector(__ret_408, __ret_408, 1, 0); \ - __ret_408; \ +#define vcopyq_laneq_u64(__p0_308, __p1_308, __p2_308, __p3_308) __extension__ ({ \ + uint64x2_t __ret_308; \ + uint64x2_t __s0_308 = __p0_308; \ + uint64x2_t __s2_308 = __p2_308; \ + uint64x2_t __rev0_308; __rev0_308 = __builtin_shufflevector(__s0_308, __s0_308, 1, 0); \ + uint64x2_t __rev2_308; __rev2_308 = __builtin_shufflevector(__s2_308, __s2_308, 1, 0); \ + __ret_308 = __noswap_vsetq_lane_u64(__noswap_vgetq_lane_u64(__rev2_308, __p3_308), __rev0_308, __p1_308); \ + __ret_308 = __builtin_shufflevector(__ret_308, __ret_308, 1, 0); \ + __ret_308; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vcopyq_laneq_u16(__p0_409, __p1_409, __p2_409, __p3_409) __extension__ ({ \ - uint16x8_t __ret_409; \ - uint16x8_t __s0_409 = __p0_409; \ - uint16x8_t __s2_409 = __p2_409; \ - __ret_409 = vsetq_lane_u16(vgetq_lane_u16(__s2_409, __p3_409), __s0_409, __p1_409); \ - __ret_409; \ +#define vcopyq_laneq_u16(__p0_309, __p1_309, __p2_309, __p3_309) __extension__ ({ \ + uint16x8_t __ret_309; \ + uint16x8_t __s0_309 = __p0_309; \ + uint16x8_t __s2_309 = __p2_309; \ + __ret_309 = vsetq_lane_u16(vgetq_lane_u16(__s2_309, __p3_309), __s0_309, __p1_309); \ + __ret_309; \ }) #else -#define vcopyq_laneq_u16(__p0_410, __p1_410, __p2_410, __p3_410) __extension__ ({ \ - uint16x8_t __ret_410; \ - uint16x8_t __s0_410 = __p0_410; \ - uint16x8_t __s2_410 = __p2_410; \ - uint16x8_t __rev0_410; __rev0_410 = __builtin_shufflevector(__s0_410, __s0_410, 7, 6, 5, 4, 3, 2, 1, 0); \ - uint16x8_t __rev2_410; __rev2_410 = __builtin_shufflevector(__s2_410, __s2_410, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_410 = __noswap_vsetq_lane_u16(__noswap_vgetq_lane_u16(__rev2_410, __p3_410), __rev0_410, __p1_410); \ - __ret_410 = __builtin_shufflevector(__ret_410, __ret_410, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_410; \ +#define vcopyq_laneq_u16(__p0_310, __p1_310, __p2_310, __p3_310) __extension__ ({ \ + uint16x8_t __ret_310; \ + uint16x8_t __s0_310 = __p0_310; \ + uint16x8_t __s2_310 = __p2_310; \ + uint16x8_t __rev0_310; __rev0_310 = __builtin_shufflevector(__s0_310, __s0_310, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint16x8_t __rev2_310; __rev2_310 = __builtin_shufflevector(__s2_310, __s2_310, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_310 = __noswap_vsetq_lane_u16(__noswap_vgetq_lane_u16(__rev2_310, __p3_310), __rev0_310, __p1_310); \ + __ret_310 = __builtin_shufflevector(__ret_310, __ret_310, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_310; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vcopyq_laneq_s8(__p0_411, __p1_411, __p2_411, __p3_411) __extension__ ({ \ - int8x16_t __ret_411; \ - int8x16_t __s0_411 = __p0_411; \ - int8x16_t __s2_411 = __p2_411; \ - __ret_411 = vsetq_lane_s8(vgetq_lane_s8(__s2_411, __p3_411), __s0_411, __p1_411); \ - __ret_411; \ +#define vcopyq_laneq_s8(__p0_311, __p1_311, __p2_311, __p3_311) __extension__ ({ \ + int8x16_t __ret_311; \ + int8x16_t __s0_311 = __p0_311; \ + int8x16_t __s2_311 = __p2_311; \ + __ret_311 = vsetq_lane_s8(vgetq_lane_s8(__s2_311, __p3_311), __s0_311, __p1_311); \ + __ret_311; \ }) #else -#define vcopyq_laneq_s8(__p0_412, __p1_412, __p2_412, __p3_412) __extension__ ({ \ - int8x16_t __ret_412; \ - int8x16_t __s0_412 = __p0_412; \ - int8x16_t __s2_412 = __p2_412; \ - int8x16_t __rev0_412; __rev0_412 = __builtin_shufflevector(__s0_412, __s0_412, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - int8x16_t __rev2_412; __rev2_412 = __builtin_shufflevector(__s2_412, __s2_412, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_412 = __noswap_vsetq_lane_s8(__noswap_vgetq_lane_s8(__rev2_412, __p3_412), __rev0_412, __p1_412); \ - __ret_412 = __builtin_shufflevector(__ret_412, __ret_412, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_412; \ +#define vcopyq_laneq_s8(__p0_312, __p1_312, __p2_312, __p3_312) __extension__ ({ \ + int8x16_t __ret_312; \ + int8x16_t __s0_312 = __p0_312; \ + int8x16_t __s2_312 = __p2_312; \ + int8x16_t __rev0_312; __rev0_312 = __builtin_shufflevector(__s0_312, __s0_312, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + int8x16_t __rev2_312; __rev2_312 = __builtin_shufflevector(__s2_312, __s2_312, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_312 = __noswap_vsetq_lane_s8(__noswap_vgetq_lane_s8(__rev2_312, __p3_312), __rev0_312, __p1_312); \ + __ret_312 = __builtin_shufflevector(__ret_312, __ret_312, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_312; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vcopyq_laneq_f32(__p0_413, __p1_413, __p2_413, __p3_413) __extension__ ({ \ - float32x4_t __ret_413; \ - float32x4_t __s0_413 = __p0_413; \ - float32x4_t __s2_413 = __p2_413; \ - __ret_413 = vsetq_lane_f32(vgetq_lane_f32(__s2_413, __p3_413), __s0_413, __p1_413); \ - __ret_413; \ +#define vcopyq_laneq_f32(__p0_313, __p1_313, __p2_313, __p3_313) __extension__ ({ \ + float32x4_t __ret_313; \ + float32x4_t __s0_313 = __p0_313; \ + float32x4_t __s2_313 = __p2_313; \ + __ret_313 = vsetq_lane_f32(vgetq_lane_f32(__s2_313, __p3_313), __s0_313, __p1_313); \ + __ret_313; \ }) #else -#define vcopyq_laneq_f32(__p0_414, __p1_414, __p2_414, __p3_414) __extension__ ({ \ - float32x4_t __ret_414; \ - float32x4_t __s0_414 = __p0_414; \ - float32x4_t __s2_414 = __p2_414; \ - float32x4_t __rev0_414; __rev0_414 = __builtin_shufflevector(__s0_414, __s0_414, 3, 2, 1, 0); \ - float32x4_t __rev2_414; __rev2_414 = __builtin_shufflevector(__s2_414, __s2_414, 3, 2, 1, 0); \ - __ret_414 = __noswap_vsetq_lane_f32(__noswap_vgetq_lane_f32(__rev2_414, __p3_414), __rev0_414, __p1_414); \ - __ret_414 = __builtin_shufflevector(__ret_414, __ret_414, 3, 2, 1, 0); \ - __ret_414; \ +#define vcopyq_laneq_f32(__p0_314, __p1_314, __p2_314, __p3_314) __extension__ ({ \ + float32x4_t __ret_314; \ + float32x4_t __s0_314 = __p0_314; \ + float32x4_t __s2_314 = __p2_314; \ + float32x4_t __rev0_314; __rev0_314 = __builtin_shufflevector(__s0_314, __s0_314, 3, 2, 1, 0); \ + float32x4_t __rev2_314; __rev2_314 = __builtin_shufflevector(__s2_314, __s2_314, 3, 2, 1, 0); \ + __ret_314 = __noswap_vsetq_lane_f32(__noswap_vgetq_lane_f32(__rev2_314, __p3_314), __rev0_314, __p1_314); \ + __ret_314 = __builtin_shufflevector(__ret_314, __ret_314, 3, 2, 1, 0); \ + __ret_314; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vcopyq_laneq_s32(__p0_415, __p1_415, __p2_415, __p3_415) __extension__ ({ \ - int32x4_t __ret_415; \ - int32x4_t __s0_415 = __p0_415; \ - int32x4_t __s2_415 = __p2_415; \ - __ret_415 = vsetq_lane_s32(vgetq_lane_s32(__s2_415, __p3_415), __s0_415, __p1_415); \ - __ret_415; \ +#define vcopyq_laneq_s32(__p0_315, __p1_315, __p2_315, __p3_315) __extension__ ({ \ + int32x4_t __ret_315; \ + int32x4_t __s0_315 = __p0_315; \ + int32x4_t __s2_315 = __p2_315; \ + __ret_315 = vsetq_lane_s32(vgetq_lane_s32(__s2_315, __p3_315), __s0_315, __p1_315); \ + __ret_315; \ }) #else -#define vcopyq_laneq_s32(__p0_416, __p1_416, __p2_416, __p3_416) __extension__ ({ \ - int32x4_t __ret_416; \ - int32x4_t __s0_416 = __p0_416; \ - int32x4_t __s2_416 = __p2_416; \ - int32x4_t __rev0_416; __rev0_416 = __builtin_shufflevector(__s0_416, __s0_416, 3, 2, 1, 0); \ - int32x4_t __rev2_416; __rev2_416 = __builtin_shufflevector(__s2_416, __s2_416, 3, 2, 1, 0); \ - __ret_416 = __noswap_vsetq_lane_s32(__noswap_vgetq_lane_s32(__rev2_416, __p3_416), __rev0_416, __p1_416); \ - __ret_416 = __builtin_shufflevector(__ret_416, __ret_416, 3, 2, 1, 0); \ - __ret_416; \ +#define vcopyq_laneq_s32(__p0_316, __p1_316, __p2_316, __p3_316) __extension__ ({ \ + int32x4_t __ret_316; \ + int32x4_t __s0_316 = __p0_316; \ + int32x4_t __s2_316 = __p2_316; \ + int32x4_t __rev0_316; __rev0_316 = __builtin_shufflevector(__s0_316, __s0_316, 3, 2, 1, 0); \ + int32x4_t __rev2_316; __rev2_316 = __builtin_shufflevector(__s2_316, __s2_316, 3, 2, 1, 0); \ + __ret_316 = __noswap_vsetq_lane_s32(__noswap_vgetq_lane_s32(__rev2_316, __p3_316), __rev0_316, __p1_316); \ + __ret_316 = __builtin_shufflevector(__ret_316, __ret_316, 3, 2, 1, 0); \ + __ret_316; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vcopyq_laneq_s64(__p0_417, __p1_417, __p2_417, __p3_417) __extension__ ({ \ - int64x2_t __ret_417; \ - int64x2_t __s0_417 = __p0_417; \ - int64x2_t __s2_417 = __p2_417; \ - __ret_417 = vsetq_lane_s64(vgetq_lane_s64(__s2_417, __p3_417), __s0_417, __p1_417); \ - __ret_417; \ +#define vcopyq_laneq_s64(__p0_317, __p1_317, __p2_317, __p3_317) __extension__ ({ \ + int64x2_t __ret_317; \ + int64x2_t __s0_317 = __p0_317; \ + int64x2_t __s2_317 = __p2_317; \ + __ret_317 = vsetq_lane_s64(vgetq_lane_s64(__s2_317, __p3_317), __s0_317, __p1_317); \ + __ret_317; \ }) #else -#define vcopyq_laneq_s64(__p0_418, __p1_418, __p2_418, __p3_418) __extension__ ({ \ - int64x2_t __ret_418; \ - int64x2_t __s0_418 = __p0_418; \ - int64x2_t __s2_418 = __p2_418; \ - int64x2_t __rev0_418; __rev0_418 = __builtin_shufflevector(__s0_418, __s0_418, 1, 0); \ - int64x2_t __rev2_418; __rev2_418 = __builtin_shufflevector(__s2_418, __s2_418, 1, 0); \ - __ret_418 = __noswap_vsetq_lane_s64(__noswap_vgetq_lane_s64(__rev2_418, __p3_418), __rev0_418, __p1_418); \ - __ret_418 = __builtin_shufflevector(__ret_418, __ret_418, 1, 0); \ - __ret_418; \ +#define vcopyq_laneq_s64(__p0_318, __p1_318, __p2_318, __p3_318) __extension__ ({ \ + int64x2_t __ret_318; \ + int64x2_t __s0_318 = __p0_318; \ + int64x2_t __s2_318 = __p2_318; \ + int64x2_t __rev0_318; __rev0_318 = __builtin_shufflevector(__s0_318, __s0_318, 1, 0); \ + int64x2_t __rev2_318; __rev2_318 = __builtin_shufflevector(__s2_318, __s2_318, 1, 0); \ + __ret_318 = __noswap_vsetq_lane_s64(__noswap_vgetq_lane_s64(__rev2_318, __p3_318), __rev0_318, __p1_318); \ + __ret_318 = __builtin_shufflevector(__ret_318, __ret_318, 1, 0); \ + __ret_318; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vcopyq_laneq_s16(__p0_419, __p1_419, __p2_419, __p3_419) __extension__ ({ \ - int16x8_t __ret_419; \ - int16x8_t __s0_419 = __p0_419; \ - int16x8_t __s2_419 = __p2_419; \ - __ret_419 = vsetq_lane_s16(vgetq_lane_s16(__s2_419, __p3_419), __s0_419, __p1_419); \ - __ret_419; \ +#define vcopyq_laneq_s16(__p0_319, __p1_319, __p2_319, __p3_319) __extension__ ({ \ + int16x8_t __ret_319; \ + int16x8_t __s0_319 = __p0_319; \ + int16x8_t __s2_319 = __p2_319; \ + __ret_319 = vsetq_lane_s16(vgetq_lane_s16(__s2_319, __p3_319), __s0_319, __p1_319); \ + __ret_319; \ }) #else -#define vcopyq_laneq_s16(__p0_420, __p1_420, __p2_420, __p3_420) __extension__ ({ \ - int16x8_t __ret_420; \ - int16x8_t __s0_420 = __p0_420; \ - int16x8_t __s2_420 = __p2_420; \ - int16x8_t __rev0_420; __rev0_420 = __builtin_shufflevector(__s0_420, __s0_420, 7, 6, 5, 4, 3, 2, 1, 0); \ - int16x8_t __rev2_420; __rev2_420 = __builtin_shufflevector(__s2_420, __s2_420, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_420 = __noswap_vsetq_lane_s16(__noswap_vgetq_lane_s16(__rev2_420, __p3_420), __rev0_420, __p1_420); \ - __ret_420 = __builtin_shufflevector(__ret_420, __ret_420, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_420; \ +#define vcopyq_laneq_s16(__p0_320, __p1_320, __p2_320, __p3_320) __extension__ ({ \ + int16x8_t __ret_320; \ + int16x8_t __s0_320 = __p0_320; \ + int16x8_t __s2_320 = __p2_320; \ + int16x8_t __rev0_320; __rev0_320 = __builtin_shufflevector(__s0_320, __s0_320, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __rev2_320; __rev2_320 = __builtin_shufflevector(__s2_320, __s2_320, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_320 = __noswap_vsetq_lane_s16(__noswap_vgetq_lane_s16(__rev2_320, __p3_320), __rev0_320, __p1_320); \ + __ret_320 = __builtin_shufflevector(__ret_320, __ret_320, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_320; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vcopy_laneq_p8(__p0_421, __p1_421, __p2_421, __p3_421) __extension__ ({ \ - poly8x8_t __ret_421; \ - poly8x8_t __s0_421 = __p0_421; \ - poly8x16_t __s2_421 = __p2_421; \ - __ret_421 = vset_lane_p8(vgetq_lane_p8(__s2_421, __p3_421), __s0_421, __p1_421); \ - __ret_421; \ +#define vcopy_laneq_p8(__p0_321, __p1_321, __p2_321, __p3_321) __extension__ ({ \ + poly8x8_t __ret_321; \ + poly8x8_t __s0_321 = __p0_321; \ + poly8x16_t __s2_321 = __p2_321; \ + __ret_321 = vset_lane_p8(vgetq_lane_p8(__s2_321, __p3_321), __s0_321, __p1_321); \ + __ret_321; \ }) #else -#define vcopy_laneq_p8(__p0_422, __p1_422, __p2_422, __p3_422) __extension__ ({ \ - poly8x8_t __ret_422; \ - poly8x8_t __s0_422 = __p0_422; \ - poly8x16_t __s2_422 = __p2_422; \ - poly8x8_t __rev0_422; __rev0_422 = __builtin_shufflevector(__s0_422, __s0_422, 7, 6, 5, 4, 3, 2, 1, 0); \ - poly8x16_t __rev2_422; __rev2_422 = __builtin_shufflevector(__s2_422, __s2_422, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_422 = __noswap_vset_lane_p8(__noswap_vgetq_lane_p8(__rev2_422, __p3_422), __rev0_422, __p1_422); \ - __ret_422 = __builtin_shufflevector(__ret_422, __ret_422, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_422; \ +#define vcopy_laneq_p8(__p0_322, __p1_322, __p2_322, __p3_322) __extension__ ({ \ + poly8x8_t __ret_322; \ + poly8x8_t __s0_322 = __p0_322; \ + poly8x16_t __s2_322 = __p2_322; \ + poly8x8_t __rev0_322; __rev0_322 = __builtin_shufflevector(__s0_322, __s0_322, 7, 6, 5, 4, 3, 2, 1, 0); \ + poly8x16_t __rev2_322; __rev2_322 = __builtin_shufflevector(__s2_322, __s2_322, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_322 = __noswap_vset_lane_p8(__noswap_vgetq_lane_p8(__rev2_322, __p3_322), __rev0_322, __p1_322); \ + __ret_322 = __builtin_shufflevector(__ret_322, __ret_322, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_322; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vcopy_laneq_p16(__p0_423, __p1_423, __p2_423, __p3_423) __extension__ ({ \ - poly16x4_t __ret_423; \ - poly16x4_t __s0_423 = __p0_423; \ - poly16x8_t __s2_423 = __p2_423; \ - __ret_423 = vset_lane_p16(vgetq_lane_p16(__s2_423, __p3_423), __s0_423, __p1_423); \ - __ret_423; \ +#define vcopy_laneq_p16(__p0_323, __p1_323, __p2_323, __p3_323) __extension__ ({ \ + poly16x4_t __ret_323; \ + poly16x4_t __s0_323 = __p0_323; \ + poly16x8_t __s2_323 = __p2_323; \ + __ret_323 = vset_lane_p16(vgetq_lane_p16(__s2_323, __p3_323), __s0_323, __p1_323); \ + __ret_323; \ }) #else -#define vcopy_laneq_p16(__p0_424, __p1_424, __p2_424, __p3_424) __extension__ ({ \ - poly16x4_t __ret_424; \ - poly16x4_t __s0_424 = __p0_424; \ - poly16x8_t __s2_424 = __p2_424; \ - poly16x4_t __rev0_424; __rev0_424 = __builtin_shufflevector(__s0_424, __s0_424, 3, 2, 1, 0); \ - poly16x8_t __rev2_424; __rev2_424 = __builtin_shufflevector(__s2_424, __s2_424, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_424 = __noswap_vset_lane_p16(__noswap_vgetq_lane_p16(__rev2_424, __p3_424), __rev0_424, __p1_424); \ - __ret_424 = __builtin_shufflevector(__ret_424, __ret_424, 3, 2, 1, 0); \ - __ret_424; \ +#define vcopy_laneq_p16(__p0_324, __p1_324, __p2_324, __p3_324) __extension__ ({ \ + poly16x4_t __ret_324; \ + poly16x4_t __s0_324 = __p0_324; \ + poly16x8_t __s2_324 = __p2_324; \ + poly16x4_t __rev0_324; __rev0_324 = __builtin_shufflevector(__s0_324, __s0_324, 3, 2, 1, 0); \ + poly16x8_t __rev2_324; __rev2_324 = __builtin_shufflevector(__s2_324, __s2_324, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_324 = __noswap_vset_lane_p16(__noswap_vgetq_lane_p16(__rev2_324, __p3_324), __rev0_324, __p1_324); \ + __ret_324 = __builtin_shufflevector(__ret_324, __ret_324, 3, 2, 1, 0); \ + __ret_324; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vcopy_laneq_u8(__p0_425, __p1_425, __p2_425, __p3_425) __extension__ ({ \ - uint8x8_t __ret_425; \ - uint8x8_t __s0_425 = __p0_425; \ - uint8x16_t __s2_425 = __p2_425; \ - __ret_425 = vset_lane_u8(vgetq_lane_u8(__s2_425, __p3_425), __s0_425, __p1_425); \ - __ret_425; \ +#define vcopy_laneq_u8(__p0_325, __p1_325, __p2_325, __p3_325) __extension__ ({ \ + uint8x8_t __ret_325; \ + uint8x8_t __s0_325 = __p0_325; \ + uint8x16_t __s2_325 = __p2_325; \ + __ret_325 = vset_lane_u8(vgetq_lane_u8(__s2_325, __p3_325), __s0_325, __p1_325); \ + __ret_325; \ }) #else -#define vcopy_laneq_u8(__p0_426, __p1_426, __p2_426, __p3_426) __extension__ ({ \ - uint8x8_t __ret_426; \ - uint8x8_t __s0_426 = __p0_426; \ - uint8x16_t __s2_426 = __p2_426; \ - uint8x8_t __rev0_426; __rev0_426 = __builtin_shufflevector(__s0_426, __s0_426, 7, 6, 5, 4, 3, 2, 1, 0); \ - uint8x16_t __rev2_426; __rev2_426 = __builtin_shufflevector(__s2_426, __s2_426, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_426 = __noswap_vset_lane_u8(__noswap_vgetq_lane_u8(__rev2_426, __p3_426), __rev0_426, __p1_426); \ - __ret_426 = __builtin_shufflevector(__ret_426, __ret_426, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_426; \ +#define vcopy_laneq_u8(__p0_326, __p1_326, __p2_326, __p3_326) __extension__ ({ \ + uint8x8_t __ret_326; \ + uint8x8_t __s0_326 = __p0_326; \ + uint8x16_t __s2_326 = __p2_326; \ + uint8x8_t __rev0_326; __rev0_326 = __builtin_shufflevector(__s0_326, __s0_326, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint8x16_t __rev2_326; __rev2_326 = __builtin_shufflevector(__s2_326, __s2_326, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_326 = __noswap_vset_lane_u8(__noswap_vgetq_lane_u8(__rev2_326, __p3_326), __rev0_326, __p1_326); \ + __ret_326 = __builtin_shufflevector(__ret_326, __ret_326, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_326; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vcopy_laneq_u32(__p0_427, __p1_427, __p2_427, __p3_427) __extension__ ({ \ - uint32x2_t __ret_427; \ - uint32x2_t __s0_427 = __p0_427; \ - uint32x4_t __s2_427 = __p2_427; \ - __ret_427 = vset_lane_u32(vgetq_lane_u32(__s2_427, __p3_427), __s0_427, __p1_427); \ - __ret_427; \ +#define vcopy_laneq_u32(__p0_327, __p1_327, __p2_327, __p3_327) __extension__ ({ \ + uint32x2_t __ret_327; \ + uint32x2_t __s0_327 = __p0_327; \ + uint32x4_t __s2_327 = __p2_327; \ + __ret_327 = vset_lane_u32(vgetq_lane_u32(__s2_327, __p3_327), __s0_327, __p1_327); \ + __ret_327; \ }) #else -#define vcopy_laneq_u32(__p0_428, __p1_428, __p2_428, __p3_428) __extension__ ({ \ - uint32x2_t __ret_428; \ - uint32x2_t __s0_428 = __p0_428; \ - uint32x4_t __s2_428 = __p2_428; \ - uint32x2_t __rev0_428; __rev0_428 = __builtin_shufflevector(__s0_428, __s0_428, 1, 0); \ - uint32x4_t __rev2_428; __rev2_428 = __builtin_shufflevector(__s2_428, __s2_428, 3, 2, 1, 0); \ - __ret_428 = __noswap_vset_lane_u32(__noswap_vgetq_lane_u32(__rev2_428, __p3_428), __rev0_428, __p1_428); \ - __ret_428 = __builtin_shufflevector(__ret_428, __ret_428, 1, 0); \ - __ret_428; \ +#define vcopy_laneq_u32(__p0_328, __p1_328, __p2_328, __p3_328) __extension__ ({ \ + uint32x2_t __ret_328; \ + uint32x2_t __s0_328 = __p0_328; \ + uint32x4_t __s2_328 = __p2_328; \ + uint32x2_t __rev0_328; __rev0_328 = __builtin_shufflevector(__s0_328, __s0_328, 1, 0); \ + uint32x4_t __rev2_328; __rev2_328 = __builtin_shufflevector(__s2_328, __s2_328, 3, 2, 1, 0); \ + __ret_328 = __noswap_vset_lane_u32(__noswap_vgetq_lane_u32(__rev2_328, __p3_328), __rev0_328, __p1_328); \ + __ret_328 = __builtin_shufflevector(__ret_328, __ret_328, 1, 0); \ + __ret_328; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vcopy_laneq_u64(__p0_429, __p1_429, __p2_429, __p3_429) __extension__ ({ \ - uint64x1_t __ret_429; \ - uint64x1_t __s0_429 = __p0_429; \ - uint64x2_t __s2_429 = __p2_429; \ - __ret_429 = vset_lane_u64(vgetq_lane_u64(__s2_429, __p3_429), __s0_429, __p1_429); \ - __ret_429; \ +#define vcopy_laneq_u64(__p0_329, __p1_329, __p2_329, __p3_329) __extension__ ({ \ + uint64x1_t __ret_329; \ + uint64x1_t __s0_329 = __p0_329; \ + uint64x2_t __s2_329 = __p2_329; \ + __ret_329 = vset_lane_u64(vgetq_lane_u64(__s2_329, __p3_329), __s0_329, __p1_329); \ + __ret_329; \ }) #else -#define vcopy_laneq_u64(__p0_430, __p1_430, __p2_430, __p3_430) __extension__ ({ \ - uint64x1_t __ret_430; \ - uint64x1_t __s0_430 = __p0_430; \ - uint64x2_t __s2_430 = __p2_430; \ - uint64x2_t __rev2_430; __rev2_430 = __builtin_shufflevector(__s2_430, __s2_430, 1, 0); \ - __ret_430 = vset_lane_u64(__noswap_vgetq_lane_u64(__rev2_430, __p3_430), __s0_430, __p1_430); \ - __ret_430; \ +#define vcopy_laneq_u64(__p0_330, __p1_330, __p2_330, __p3_330) __extension__ ({ \ + uint64x1_t __ret_330; \ + uint64x1_t __s0_330 = __p0_330; \ + uint64x2_t __s2_330 = __p2_330; \ + uint64x2_t __rev2_330; __rev2_330 = __builtin_shufflevector(__s2_330, __s2_330, 1, 0); \ + __ret_330 = vset_lane_u64(__noswap_vgetq_lane_u64(__rev2_330, __p3_330), __s0_330, __p1_330); \ + __ret_330; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vcopy_laneq_u16(__p0_431, __p1_431, __p2_431, __p3_431) __extension__ ({ \ - uint16x4_t __ret_431; \ - uint16x4_t __s0_431 = __p0_431; \ - uint16x8_t __s2_431 = __p2_431; \ - __ret_431 = vset_lane_u16(vgetq_lane_u16(__s2_431, __p3_431), __s0_431, __p1_431); \ - __ret_431; \ +#define vcopy_laneq_u16(__p0_331, __p1_331, __p2_331, __p3_331) __extension__ ({ \ + uint16x4_t __ret_331; \ + uint16x4_t __s0_331 = __p0_331; \ + uint16x8_t __s2_331 = __p2_331; \ + __ret_331 = vset_lane_u16(vgetq_lane_u16(__s2_331, __p3_331), __s0_331, __p1_331); \ + __ret_331; \ }) #else -#define vcopy_laneq_u16(__p0_432, __p1_432, __p2_432, __p3_432) __extension__ ({ \ - uint16x4_t __ret_432; \ - uint16x4_t __s0_432 = __p0_432; \ - uint16x8_t __s2_432 = __p2_432; \ - uint16x4_t __rev0_432; __rev0_432 = __builtin_shufflevector(__s0_432, __s0_432, 3, 2, 1, 0); \ - uint16x8_t __rev2_432; __rev2_432 = __builtin_shufflevector(__s2_432, __s2_432, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_432 = __noswap_vset_lane_u16(__noswap_vgetq_lane_u16(__rev2_432, __p3_432), __rev0_432, __p1_432); \ - __ret_432 = __builtin_shufflevector(__ret_432, __ret_432, 3, 2, 1, 0); \ - __ret_432; \ +#define vcopy_laneq_u16(__p0_332, __p1_332, __p2_332, __p3_332) __extension__ ({ \ + uint16x4_t __ret_332; \ + uint16x4_t __s0_332 = __p0_332; \ + uint16x8_t __s2_332 = __p2_332; \ + uint16x4_t __rev0_332; __rev0_332 = __builtin_shufflevector(__s0_332, __s0_332, 3, 2, 1, 0); \ + uint16x8_t __rev2_332; __rev2_332 = __builtin_shufflevector(__s2_332, __s2_332, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_332 = __noswap_vset_lane_u16(__noswap_vgetq_lane_u16(__rev2_332, __p3_332), __rev0_332, __p1_332); \ + __ret_332 = __builtin_shufflevector(__ret_332, __ret_332, 3, 2, 1, 0); \ + __ret_332; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vcopy_laneq_s8(__p0_433, __p1_433, __p2_433, __p3_433) __extension__ ({ \ - int8x8_t __ret_433; \ - int8x8_t __s0_433 = __p0_433; \ - int8x16_t __s2_433 = __p2_433; \ - __ret_433 = vset_lane_s8(vgetq_lane_s8(__s2_433, __p3_433), __s0_433, __p1_433); \ - __ret_433; \ +#define vcopy_laneq_s8(__p0_333, __p1_333, __p2_333, __p3_333) __extension__ ({ \ + int8x8_t __ret_333; \ + int8x8_t __s0_333 = __p0_333; \ + int8x16_t __s2_333 = __p2_333; \ + __ret_333 = vset_lane_s8(vgetq_lane_s8(__s2_333, __p3_333), __s0_333, __p1_333); \ + __ret_333; \ }) #else -#define vcopy_laneq_s8(__p0_434, __p1_434, __p2_434, __p3_434) __extension__ ({ \ - int8x8_t __ret_434; \ - int8x8_t __s0_434 = __p0_434; \ - int8x16_t __s2_434 = __p2_434; \ - int8x8_t __rev0_434; __rev0_434 = __builtin_shufflevector(__s0_434, __s0_434, 7, 6, 5, 4, 3, 2, 1, 0); \ - int8x16_t __rev2_434; __rev2_434 = __builtin_shufflevector(__s2_434, __s2_434, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_434 = __noswap_vset_lane_s8(__noswap_vgetq_lane_s8(__rev2_434, __p3_434), __rev0_434, __p1_434); \ - __ret_434 = __builtin_shufflevector(__ret_434, __ret_434, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_434; \ +#define vcopy_laneq_s8(__p0_334, __p1_334, __p2_334, __p3_334) __extension__ ({ \ + int8x8_t __ret_334; \ + int8x8_t __s0_334 = __p0_334; \ + int8x16_t __s2_334 = __p2_334; \ + int8x8_t __rev0_334; __rev0_334 = __builtin_shufflevector(__s0_334, __s0_334, 7, 6, 5, 4, 3, 2, 1, 0); \ + int8x16_t __rev2_334; __rev2_334 = __builtin_shufflevector(__s2_334, __s2_334, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_334 = __noswap_vset_lane_s8(__noswap_vgetq_lane_s8(__rev2_334, __p3_334), __rev0_334, __p1_334); \ + __ret_334 = __builtin_shufflevector(__ret_334, __ret_334, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_334; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vcopy_laneq_f32(__p0_435, __p1_435, __p2_435, __p3_435) __extension__ ({ \ - float32x2_t __ret_435; \ - float32x2_t __s0_435 = __p0_435; \ - float32x4_t __s2_435 = __p2_435; \ - __ret_435 = vset_lane_f32(vgetq_lane_f32(__s2_435, __p3_435), __s0_435, __p1_435); \ - __ret_435; \ +#define vcopy_laneq_f32(__p0_335, __p1_335, __p2_335, __p3_335) __extension__ ({ \ + float32x2_t __ret_335; \ + float32x2_t __s0_335 = __p0_335; \ + float32x4_t __s2_335 = __p2_335; \ + __ret_335 = vset_lane_f32(vgetq_lane_f32(__s2_335, __p3_335), __s0_335, __p1_335); \ + __ret_335; \ }) #else -#define vcopy_laneq_f32(__p0_436, __p1_436, __p2_436, __p3_436) __extension__ ({ \ - float32x2_t __ret_436; \ - float32x2_t __s0_436 = __p0_436; \ - float32x4_t __s2_436 = __p2_436; \ - float32x2_t __rev0_436; __rev0_436 = __builtin_shufflevector(__s0_436, __s0_436, 1, 0); \ - float32x4_t __rev2_436; __rev2_436 = __builtin_shufflevector(__s2_436, __s2_436, 3, 2, 1, 0); \ - __ret_436 = __noswap_vset_lane_f32(__noswap_vgetq_lane_f32(__rev2_436, __p3_436), __rev0_436, __p1_436); \ - __ret_436 = __builtin_shufflevector(__ret_436, __ret_436, 1, 0); \ - __ret_436; \ +#define vcopy_laneq_f32(__p0_336, __p1_336, __p2_336, __p3_336) __extension__ ({ \ + float32x2_t __ret_336; \ + float32x2_t __s0_336 = __p0_336; \ + float32x4_t __s2_336 = __p2_336; \ + float32x2_t __rev0_336; __rev0_336 = __builtin_shufflevector(__s0_336, __s0_336, 1, 0); \ + float32x4_t __rev2_336; __rev2_336 = __builtin_shufflevector(__s2_336, __s2_336, 3, 2, 1, 0); \ + __ret_336 = __noswap_vset_lane_f32(__noswap_vgetq_lane_f32(__rev2_336, __p3_336), __rev0_336, __p1_336); \ + __ret_336 = __builtin_shufflevector(__ret_336, __ret_336, 1, 0); \ + __ret_336; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vcopy_laneq_s32(__p0_437, __p1_437, __p2_437, __p3_437) __extension__ ({ \ - int32x2_t __ret_437; \ - int32x2_t __s0_437 = __p0_437; \ - int32x4_t __s2_437 = __p2_437; \ - __ret_437 = vset_lane_s32(vgetq_lane_s32(__s2_437, __p3_437), __s0_437, __p1_437); \ - __ret_437; \ +#define vcopy_laneq_s32(__p0_337, __p1_337, __p2_337, __p3_337) __extension__ ({ \ + int32x2_t __ret_337; \ + int32x2_t __s0_337 = __p0_337; \ + int32x4_t __s2_337 = __p2_337; \ + __ret_337 = vset_lane_s32(vgetq_lane_s32(__s2_337, __p3_337), __s0_337, __p1_337); \ + __ret_337; \ }) #else -#define vcopy_laneq_s32(__p0_438, __p1_438, __p2_438, __p3_438) __extension__ ({ \ - int32x2_t __ret_438; \ - int32x2_t __s0_438 = __p0_438; \ - int32x4_t __s2_438 = __p2_438; \ - int32x2_t __rev0_438; __rev0_438 = __builtin_shufflevector(__s0_438, __s0_438, 1, 0); \ - int32x4_t __rev2_438; __rev2_438 = __builtin_shufflevector(__s2_438, __s2_438, 3, 2, 1, 0); \ - __ret_438 = __noswap_vset_lane_s32(__noswap_vgetq_lane_s32(__rev2_438, __p3_438), __rev0_438, __p1_438); \ - __ret_438 = __builtin_shufflevector(__ret_438, __ret_438, 1, 0); \ - __ret_438; \ +#define vcopy_laneq_s32(__p0_338, __p1_338, __p2_338, __p3_338) __extension__ ({ \ + int32x2_t __ret_338; \ + int32x2_t __s0_338 = __p0_338; \ + int32x4_t __s2_338 = __p2_338; \ + int32x2_t __rev0_338; __rev0_338 = __builtin_shufflevector(__s0_338, __s0_338, 1, 0); \ + int32x4_t __rev2_338; __rev2_338 = __builtin_shufflevector(__s2_338, __s2_338, 3, 2, 1, 0); \ + __ret_338 = __noswap_vset_lane_s32(__noswap_vgetq_lane_s32(__rev2_338, __p3_338), __rev0_338, __p1_338); \ + __ret_338 = __builtin_shufflevector(__ret_338, __ret_338, 1, 0); \ + __ret_338; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vcopy_laneq_s64(__p0_439, __p1_439, __p2_439, __p3_439) __extension__ ({ \ - int64x1_t __ret_439; \ - int64x1_t __s0_439 = __p0_439; \ - int64x2_t __s2_439 = __p2_439; \ - __ret_439 = vset_lane_s64(vgetq_lane_s64(__s2_439, __p3_439), __s0_439, __p1_439); \ - __ret_439; \ +#define vcopy_laneq_s64(__p0_339, __p1_339, __p2_339, __p3_339) __extension__ ({ \ + int64x1_t __ret_339; \ + int64x1_t __s0_339 = __p0_339; \ + int64x2_t __s2_339 = __p2_339; \ + __ret_339 = vset_lane_s64(vgetq_lane_s64(__s2_339, __p3_339), __s0_339, __p1_339); \ + __ret_339; \ }) #else -#define vcopy_laneq_s64(__p0_440, __p1_440, __p2_440, __p3_440) __extension__ ({ \ - int64x1_t __ret_440; \ - int64x1_t __s0_440 = __p0_440; \ - int64x2_t __s2_440 = __p2_440; \ - int64x2_t __rev2_440; __rev2_440 = __builtin_shufflevector(__s2_440, __s2_440, 1, 0); \ - __ret_440 = vset_lane_s64(__noswap_vgetq_lane_s64(__rev2_440, __p3_440), __s0_440, __p1_440); \ - __ret_440; \ +#define vcopy_laneq_s64(__p0_340, __p1_340, __p2_340, __p3_340) __extension__ ({ \ + int64x1_t __ret_340; \ + int64x1_t __s0_340 = __p0_340; \ + int64x2_t __s2_340 = __p2_340; \ + int64x2_t __rev2_340; __rev2_340 = __builtin_shufflevector(__s2_340, __s2_340, 1, 0); \ + __ret_340 = vset_lane_s64(__noswap_vgetq_lane_s64(__rev2_340, __p3_340), __s0_340, __p1_340); \ + __ret_340; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vcopy_laneq_s16(__p0_441, __p1_441, __p2_441, __p3_441) __extension__ ({ \ - int16x4_t __ret_441; \ - int16x4_t __s0_441 = __p0_441; \ - int16x8_t __s2_441 = __p2_441; \ - __ret_441 = vset_lane_s16(vgetq_lane_s16(__s2_441, __p3_441), __s0_441, __p1_441); \ - __ret_441; \ +#define vcopy_laneq_s16(__p0_341, __p1_341, __p2_341, __p3_341) __extension__ ({ \ + int16x4_t __ret_341; \ + int16x4_t __s0_341 = __p0_341; \ + int16x8_t __s2_341 = __p2_341; \ + __ret_341 = vset_lane_s16(vgetq_lane_s16(__s2_341, __p3_341), __s0_341, __p1_341); \ + __ret_341; \ }) #else -#define vcopy_laneq_s16(__p0_442, __p1_442, __p2_442, __p3_442) __extension__ ({ \ - int16x4_t __ret_442; \ - int16x4_t __s0_442 = __p0_442; \ - int16x8_t __s2_442 = __p2_442; \ - int16x4_t __rev0_442; __rev0_442 = __builtin_shufflevector(__s0_442, __s0_442, 3, 2, 1, 0); \ - int16x8_t __rev2_442; __rev2_442 = __builtin_shufflevector(__s2_442, __s2_442, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_442 = __noswap_vset_lane_s16(__noswap_vgetq_lane_s16(__rev2_442, __p3_442), __rev0_442, __p1_442); \ - __ret_442 = __builtin_shufflevector(__ret_442, __ret_442, 3, 2, 1, 0); \ - __ret_442; \ +#define vcopy_laneq_s16(__p0_342, __p1_342, __p2_342, __p3_342) __extension__ ({ \ + int16x4_t __ret_342; \ + int16x4_t __s0_342 = __p0_342; \ + int16x8_t __s2_342 = __p2_342; \ + int16x4_t __rev0_342; __rev0_342 = __builtin_shufflevector(__s0_342, __s0_342, 3, 2, 1, 0); \ + int16x8_t __rev2_342; __rev2_342 = __builtin_shufflevector(__s2_342, __s2_342, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_342 = __noswap_vset_lane_s16(__noswap_vgetq_lane_s16(__rev2_342, __p3_342), __rev0_342, __p1_342); \ + __ret_342 = __builtin_shufflevector(__ret_342, __ret_342, 3, 2, 1, 0); \ + __ret_342; \ }) #endif @@ -51468,6 +45429,27 @@ __ai int32_t vcvtas_s32_f32(float32_t __p0) { __ret = (int32_t) __builtin_neon_vcvtas_s32_f32(__p0); return __ret; } +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vcvtaq_s64_f64(float64x2_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t) __builtin_neon_vcvtaq_s64_v((int8x16_t)__p0, 35); + return __ret; +} +#else +__ai int64x2_t vcvtaq_s64_f64(float64x2_t __p0) { + int64x2_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (int64x2_t) __builtin_neon_vcvtaq_s64_v((int8x16_t)__rev0, 35); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai int64x1_t vcvta_s64_f64(float64x1_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t) __builtin_neon_vcvta_s64_v((int8x8_t)__p0, 3); + return __ret; +} __ai int64_t vcvtad_s64_f64(float64_t __p0) { int64_t __ret; __ret = (int64_t) __builtin_neon_vcvtad_s64_f64(__p0); @@ -51478,6 +45460,27 @@ __ai uint32_t vcvtas_u32_f32(float32_t __p0) { __ret = (uint32_t) __builtin_neon_vcvtas_u32_f32(__p0); return __ret; } +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vcvtaq_u64_f64(float64x2_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vcvtaq_u64_v((int8x16_t)__p0, 51); + return __ret; +} +#else +__ai uint64x2_t vcvtaq_u64_f64(float64x2_t __p0) { + uint64x2_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (uint64x2_t) __builtin_neon_vcvtaq_u64_v((int8x16_t)__rev0, 51); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai uint64x1_t vcvta_u64_f64(float64x1_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t) __builtin_neon_vcvta_u64_v((int8x8_t)__p0, 19); + return __ret; +} __ai uint64_t vcvtad_u64_f64(float64_t __p0) { uint64_t __ret; __ret = (uint64_t) __builtin_neon_vcvtad_u64_f64(__p0); @@ -51488,6 +45491,27 @@ __ai int32_t vcvtms_s32_f32(float32_t __p0) { __ret = (int32_t) __builtin_neon_vcvtms_s32_f32(__p0); return __ret; } +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vcvtmq_s64_f64(float64x2_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t) __builtin_neon_vcvtmq_s64_v((int8x16_t)__p0, 35); + return __ret; +} +#else +__ai int64x2_t vcvtmq_s64_f64(float64x2_t __p0) { + int64x2_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (int64x2_t) __builtin_neon_vcvtmq_s64_v((int8x16_t)__rev0, 35); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai int64x1_t vcvtm_s64_f64(float64x1_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t) __builtin_neon_vcvtm_s64_v((int8x8_t)__p0, 3); + return __ret; +} __ai int64_t vcvtmd_s64_f64(float64_t __p0) { int64_t __ret; __ret = (int64_t) __builtin_neon_vcvtmd_s64_f64(__p0); @@ -51498,6 +45522,27 @@ __ai uint32_t vcvtms_u32_f32(float32_t __p0) { __ret = (uint32_t) __builtin_neon_vcvtms_u32_f32(__p0); return __ret; } +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vcvtmq_u64_f64(float64x2_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vcvtmq_u64_v((int8x16_t)__p0, 51); + return __ret; +} +#else +__ai uint64x2_t vcvtmq_u64_f64(float64x2_t __p0) { + uint64x2_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (uint64x2_t) __builtin_neon_vcvtmq_u64_v((int8x16_t)__rev0, 51); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai uint64x1_t vcvtm_u64_f64(float64x1_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t) __builtin_neon_vcvtm_u64_v((int8x8_t)__p0, 19); + return __ret; +} __ai uint64_t vcvtmd_u64_f64(float64_t __p0) { uint64_t __ret; __ret = (uint64_t) __builtin_neon_vcvtmd_u64_f64(__p0); @@ -51508,6 +45553,27 @@ __ai int32_t vcvtns_s32_f32(float32_t __p0) { __ret = (int32_t) __builtin_neon_vcvtns_s32_f32(__p0); return __ret; } +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vcvtnq_s64_f64(float64x2_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t) __builtin_neon_vcvtnq_s64_v((int8x16_t)__p0, 35); + return __ret; +} +#else +__ai int64x2_t vcvtnq_s64_f64(float64x2_t __p0) { + int64x2_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (int64x2_t) __builtin_neon_vcvtnq_s64_v((int8x16_t)__rev0, 35); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai int64x1_t vcvtn_s64_f64(float64x1_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t) __builtin_neon_vcvtn_s64_v((int8x8_t)__p0, 3); + return __ret; +} __ai int64_t vcvtnd_s64_f64(float64_t __p0) { int64_t __ret; __ret = (int64_t) __builtin_neon_vcvtnd_s64_f64(__p0); @@ -51518,6 +45584,27 @@ __ai uint32_t vcvtns_u32_f32(float32_t __p0) { __ret = (uint32_t) __builtin_neon_vcvtns_u32_f32(__p0); return __ret; } +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vcvtnq_u64_f64(float64x2_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vcvtnq_u64_v((int8x16_t)__p0, 51); + return __ret; +} +#else +__ai uint64x2_t vcvtnq_u64_f64(float64x2_t __p0) { + uint64x2_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (uint64x2_t) __builtin_neon_vcvtnq_u64_v((int8x16_t)__rev0, 51); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai uint64x1_t vcvtn_u64_f64(float64x1_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t) __builtin_neon_vcvtn_u64_v((int8x8_t)__p0, 19); + return __ret; +} __ai uint64_t vcvtnd_u64_f64(float64_t __p0) { uint64_t __ret; __ret = (uint64_t) __builtin_neon_vcvtnd_u64_f64(__p0); @@ -51528,6 +45615,27 @@ __ai int32_t vcvtps_s32_f32(float32_t __p0) { __ret = (int32_t) __builtin_neon_vcvtps_s32_f32(__p0); return __ret; } +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vcvtpq_s64_f64(float64x2_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t) __builtin_neon_vcvtpq_s64_v((int8x16_t)__p0, 35); + return __ret; +} +#else +__ai int64x2_t vcvtpq_s64_f64(float64x2_t __p0) { + int64x2_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (int64x2_t) __builtin_neon_vcvtpq_s64_v((int8x16_t)__rev0, 35); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai int64x1_t vcvtp_s64_f64(float64x1_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t) __builtin_neon_vcvtp_s64_v((int8x8_t)__p0, 3); + return __ret; +} __ai int64_t vcvtpd_s64_f64(float64_t __p0) { int64_t __ret; __ret = (int64_t) __builtin_neon_vcvtpd_s64_f64(__p0); @@ -51538,6 +45646,27 @@ __ai uint32_t vcvtps_u32_f32(float32_t __p0) { __ret = (uint32_t) __builtin_neon_vcvtps_u32_f32(__p0); return __ret; } +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vcvtpq_u64_f64(float64x2_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vcvtpq_u64_v((int8x16_t)__p0, 51); + return __ret; +} +#else +__ai uint64x2_t vcvtpq_u64_f64(float64x2_t __p0) { + uint64x2_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (uint64x2_t) __builtin_neon_vcvtpq_u64_v((int8x16_t)__rev0, 51); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai uint64x1_t vcvtp_u64_f64(float64x1_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t) __builtin_neon_vcvtp_u64_v((int8x8_t)__p0, 19); + return __ret; +} __ai uint64_t vcvtpd_u64_f64(float64_t __p0) { uint64_t __ret; __ret = (uint64_t) __builtin_neon_vcvtpd_u64_f64(__p0); @@ -51813,88 +45942,52 @@ __ai float32x2_t vdiv_f32(float32x2_t __p0, float32x2_t __p1) { }) #endif -#define vdup_lane_p64(__p0_443, __p1_443) __extension__ ({ \ - poly64x1_t __ret_443; \ - poly64x1_t __s0_443 = __p0_443; \ - __ret_443 = splat_lane_p64(__s0_443, __p1_443); \ - __ret_443; \ +#define vdup_lane_p64(__p0_343, __p1_343) __extension__ ({ \ + poly64x1_t __ret_343; \ + poly64x1_t __s0_343 = __p0_343; \ + __ret_343 = splat_lane_p64(__s0_343, __p1_343); \ + __ret_343; \ }) #ifdef __LITTLE_ENDIAN__ -#define vdupq_lane_p64(__p0_444, __p1_444) __extension__ ({ \ - poly64x2_t __ret_444; \ - poly64x1_t __s0_444 = __p0_444; \ - __ret_444 = splatq_lane_p64(__s0_444, __p1_444); \ - __ret_444; \ +#define vdupq_lane_p64(__p0_344, __p1_344) __extension__ ({ \ + poly64x2_t __ret_344; \ + poly64x1_t __s0_344 = __p0_344; \ + __ret_344 = splatq_lane_p64(__s0_344, __p1_344); \ + __ret_344; \ }) #else -#define vdupq_lane_p64(__p0_445, __p1_445) __extension__ ({ \ - poly64x2_t __ret_445; \ - poly64x1_t __s0_445 = __p0_445; \ - __ret_445 = __noswap_splatq_lane_p64(__s0_445, __p1_445); \ - __ret_445 = __builtin_shufflevector(__ret_445, __ret_445, 1, 0); \ - __ret_445; \ +#define vdupq_lane_p64(__p0_345, __p1_345) __extension__ ({ \ + poly64x2_t __ret_345; \ + poly64x1_t __s0_345 = __p0_345; \ + __ret_345 = __noswap_splatq_lane_p64(__s0_345, __p1_345); \ + __ret_345 = __builtin_shufflevector(__ret_345, __ret_345, 1, 0); \ + __ret_345; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vdupq_lane_f64(__p0_446, __p1_446) __extension__ ({ \ - float64x2_t __ret_446; \ - float64x1_t __s0_446 = __p0_446; \ - __ret_446 = splatq_lane_f64(__s0_446, __p1_446); \ - __ret_446; \ +#define vdupq_lane_f64(__p0_346, __p1_346) __extension__ ({ \ + float64x2_t __ret_346; \ + float64x1_t __s0_346 = __p0_346; \ + __ret_346 = splatq_lane_f64(__s0_346, __p1_346); \ + __ret_346; \ }) #else -#define vdupq_lane_f64(__p0_447, __p1_447) __extension__ ({ \ - float64x2_t __ret_447; \ - float64x1_t __s0_447 = __p0_447; \ - __ret_447 = __noswap_splatq_lane_f64(__s0_447, __p1_447); \ - __ret_447 = __builtin_shufflevector(__ret_447, __ret_447, 1, 0); \ - __ret_447; \ +#define vdupq_lane_f64(__p0_347, __p1_347) __extension__ ({ \ + float64x2_t __ret_347; \ + float64x1_t __s0_347 = __p0_347; \ + __ret_347 = __noswap_splatq_lane_f64(__s0_347, __p1_347); \ + __ret_347 = __builtin_shufflevector(__ret_347, __ret_347, 1, 0); \ + __ret_347; \ }) #endif -#ifdef __LITTLE_ENDIAN__ -#define vdupq_lane_f16(__p0_448, __p1_448) __extension__ ({ \ - float16x8_t __ret_448; \ - float16x4_t __s0_448 = __p0_448; \ - __ret_448 = splatq_lane_f16(__s0_448, __p1_448); \ - __ret_448; \ +#define vdup_lane_f64(__p0_348, __p1_348) __extension__ ({ \ + float64x1_t __ret_348; \ + float64x1_t __s0_348 = __p0_348; \ + __ret_348 = splat_lane_f64(__s0_348, __p1_348); \ + __ret_348; \ }) -#else -#define vdupq_lane_f16(__p0_449, __p1_449) __extension__ ({ \ - float16x8_t __ret_449; \ - float16x4_t __s0_449 = __p0_449; \ - float16x4_t __rev0_449; __rev0_449 = __builtin_shufflevector(__s0_449, __s0_449, 3, 2, 1, 0); \ - __ret_449 = __noswap_splatq_lane_f16(__rev0_449, __p1_449); \ - __ret_449 = __builtin_shufflevector(__ret_449, __ret_449, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_449; \ -}) -#endif - -#define vdup_lane_f64(__p0_450, __p1_450) __extension__ ({ \ - float64x1_t __ret_450; \ - float64x1_t __s0_450 = __p0_450; \ - __ret_450 = splat_lane_f64(__s0_450, __p1_450); \ - __ret_450; \ -}) -#ifdef __LITTLE_ENDIAN__ -#define vdup_lane_f16(__p0_451, __p1_451) __extension__ ({ \ - float16x4_t __ret_451; \ - float16x4_t __s0_451 = __p0_451; \ - __ret_451 = splat_lane_f16(__s0_451, __p1_451); \ - __ret_451; \ -}) -#else -#define vdup_lane_f16(__p0_452, __p1_452) __extension__ ({ \ - float16x4_t __ret_452; \ - float16x4_t __s0_452 = __p0_452; \ - float16x4_t __rev0_452; __rev0_452 = __builtin_shufflevector(__s0_452, __s0_452, 3, 2, 1, 0); \ - __ret_452 = __noswap_splat_lane_f16(__rev0_452, __p1_452); \ - __ret_452 = __builtin_shufflevector(__ret_452, __ret_452, 3, 2, 1, 0); \ - __ret_452; \ -}) -#endif - #ifdef __LITTLE_ENDIAN__ #define vdupb_laneq_p8(__p0, __p1) __extension__ ({ \ poly8_t __ret; \ @@ -52100,502 +46193,502 @@ __ai float32x2_t vdiv_f32(float32x2_t __p0, float32x2_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -#define vdup_laneq_p8(__p0_453, __p1_453) __extension__ ({ \ - poly8x8_t __ret_453; \ - poly8x16_t __s0_453 = __p0_453; \ - __ret_453 = splat_laneq_p8(__s0_453, __p1_453); \ - __ret_453; \ +#define vdup_laneq_p8(__p0_349, __p1_349) __extension__ ({ \ + poly8x8_t __ret_349; \ + poly8x16_t __s0_349 = __p0_349; \ + __ret_349 = splat_laneq_p8(__s0_349, __p1_349); \ + __ret_349; \ }) #else -#define vdup_laneq_p8(__p0_454, __p1_454) __extension__ ({ \ - poly8x8_t __ret_454; \ - poly8x16_t __s0_454 = __p0_454; \ - poly8x16_t __rev0_454; __rev0_454 = __builtin_shufflevector(__s0_454, __s0_454, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_454 = __noswap_splat_laneq_p8(__rev0_454, __p1_454); \ - __ret_454 = __builtin_shufflevector(__ret_454, __ret_454, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_454; \ +#define vdup_laneq_p8(__p0_350, __p1_350) __extension__ ({ \ + poly8x8_t __ret_350; \ + poly8x16_t __s0_350 = __p0_350; \ + poly8x16_t __rev0_350; __rev0_350 = __builtin_shufflevector(__s0_350, __s0_350, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_350 = __noswap_splat_laneq_p8(__rev0_350, __p1_350); \ + __ret_350 = __builtin_shufflevector(__ret_350, __ret_350, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_350; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vdup_laneq_p64(__p0_455, __p1_455) __extension__ ({ \ - poly64x1_t __ret_455; \ - poly64x2_t __s0_455 = __p0_455; \ - __ret_455 = splat_laneq_p64(__s0_455, __p1_455); \ - __ret_455; \ +#define vdup_laneq_p64(__p0_351, __p1_351) __extension__ ({ \ + poly64x1_t __ret_351; \ + poly64x2_t __s0_351 = __p0_351; \ + __ret_351 = splat_laneq_p64(__s0_351, __p1_351); \ + __ret_351; \ }) #else -#define vdup_laneq_p64(__p0_456, __p1_456) __extension__ ({ \ - poly64x1_t __ret_456; \ - poly64x2_t __s0_456 = __p0_456; \ - poly64x2_t __rev0_456; __rev0_456 = __builtin_shufflevector(__s0_456, __s0_456, 1, 0); \ - __ret_456 = __noswap_splat_laneq_p64(__rev0_456, __p1_456); \ - __ret_456; \ +#define vdup_laneq_p64(__p0_352, __p1_352) __extension__ ({ \ + poly64x1_t __ret_352; \ + poly64x2_t __s0_352 = __p0_352; \ + poly64x2_t __rev0_352; __rev0_352 = __builtin_shufflevector(__s0_352, __s0_352, 1, 0); \ + __ret_352 = __noswap_splat_laneq_p64(__rev0_352, __p1_352); \ + __ret_352; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vdup_laneq_p16(__p0_457, __p1_457) __extension__ ({ \ - poly16x4_t __ret_457; \ - poly16x8_t __s0_457 = __p0_457; \ - __ret_457 = splat_laneq_p16(__s0_457, __p1_457); \ - __ret_457; \ +#define vdup_laneq_p16(__p0_353, __p1_353) __extension__ ({ \ + poly16x4_t __ret_353; \ + poly16x8_t __s0_353 = __p0_353; \ + __ret_353 = splat_laneq_p16(__s0_353, __p1_353); \ + __ret_353; \ }) #else -#define vdup_laneq_p16(__p0_458, __p1_458) __extension__ ({ \ - poly16x4_t __ret_458; \ - poly16x8_t __s0_458 = __p0_458; \ - poly16x8_t __rev0_458; __rev0_458 = __builtin_shufflevector(__s0_458, __s0_458, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_458 = __noswap_splat_laneq_p16(__rev0_458, __p1_458); \ - __ret_458 = __builtin_shufflevector(__ret_458, __ret_458, 3, 2, 1, 0); \ - __ret_458; \ +#define vdup_laneq_p16(__p0_354, __p1_354) __extension__ ({ \ + poly16x4_t __ret_354; \ + poly16x8_t __s0_354 = __p0_354; \ + poly16x8_t __rev0_354; __rev0_354 = __builtin_shufflevector(__s0_354, __s0_354, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_354 = __noswap_splat_laneq_p16(__rev0_354, __p1_354); \ + __ret_354 = __builtin_shufflevector(__ret_354, __ret_354, 3, 2, 1, 0); \ + __ret_354; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vdupq_laneq_p8(__p0_459, __p1_459) __extension__ ({ \ - poly8x16_t __ret_459; \ - poly8x16_t __s0_459 = __p0_459; \ - __ret_459 = splatq_laneq_p8(__s0_459, __p1_459); \ - __ret_459; \ +#define vdupq_laneq_p8(__p0_355, __p1_355) __extension__ ({ \ + poly8x16_t __ret_355; \ + poly8x16_t __s0_355 = __p0_355; \ + __ret_355 = splatq_laneq_p8(__s0_355, __p1_355); \ + __ret_355; \ }) #else -#define vdupq_laneq_p8(__p0_460, __p1_460) __extension__ ({ \ - poly8x16_t __ret_460; \ - poly8x16_t __s0_460 = __p0_460; \ - poly8x16_t __rev0_460; __rev0_460 = __builtin_shufflevector(__s0_460, __s0_460, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_460 = __noswap_splatq_laneq_p8(__rev0_460, __p1_460); \ - __ret_460 = __builtin_shufflevector(__ret_460, __ret_460, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_460; \ +#define vdupq_laneq_p8(__p0_356, __p1_356) __extension__ ({ \ + poly8x16_t __ret_356; \ + poly8x16_t __s0_356 = __p0_356; \ + poly8x16_t __rev0_356; __rev0_356 = __builtin_shufflevector(__s0_356, __s0_356, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_356 = __noswap_splatq_laneq_p8(__rev0_356, __p1_356); \ + __ret_356 = __builtin_shufflevector(__ret_356, __ret_356, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_356; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vdupq_laneq_p64(__p0_461, __p1_461) __extension__ ({ \ - poly64x2_t __ret_461; \ - poly64x2_t __s0_461 = __p0_461; \ - __ret_461 = splatq_laneq_p64(__s0_461, __p1_461); \ - __ret_461; \ +#define vdupq_laneq_p64(__p0_357, __p1_357) __extension__ ({ \ + poly64x2_t __ret_357; \ + poly64x2_t __s0_357 = __p0_357; \ + __ret_357 = splatq_laneq_p64(__s0_357, __p1_357); \ + __ret_357; \ }) #else -#define vdupq_laneq_p64(__p0_462, __p1_462) __extension__ ({ \ - poly64x2_t __ret_462; \ - poly64x2_t __s0_462 = __p0_462; \ - poly64x2_t __rev0_462; __rev0_462 = __builtin_shufflevector(__s0_462, __s0_462, 1, 0); \ - __ret_462 = __noswap_splatq_laneq_p64(__rev0_462, __p1_462); \ - __ret_462 = __builtin_shufflevector(__ret_462, __ret_462, 1, 0); \ - __ret_462; \ +#define vdupq_laneq_p64(__p0_358, __p1_358) __extension__ ({ \ + poly64x2_t __ret_358; \ + poly64x2_t __s0_358 = __p0_358; \ + poly64x2_t __rev0_358; __rev0_358 = __builtin_shufflevector(__s0_358, __s0_358, 1, 0); \ + __ret_358 = __noswap_splatq_laneq_p64(__rev0_358, __p1_358); \ + __ret_358 = __builtin_shufflevector(__ret_358, __ret_358, 1, 0); \ + __ret_358; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vdupq_laneq_p16(__p0_463, __p1_463) __extension__ ({ \ - poly16x8_t __ret_463; \ - poly16x8_t __s0_463 = __p0_463; \ - __ret_463 = splatq_laneq_p16(__s0_463, __p1_463); \ - __ret_463; \ +#define vdupq_laneq_p16(__p0_359, __p1_359) __extension__ ({ \ + poly16x8_t __ret_359; \ + poly16x8_t __s0_359 = __p0_359; \ + __ret_359 = splatq_laneq_p16(__s0_359, __p1_359); \ + __ret_359; \ }) #else -#define vdupq_laneq_p16(__p0_464, __p1_464) __extension__ ({ \ - poly16x8_t __ret_464; \ - poly16x8_t __s0_464 = __p0_464; \ - poly16x8_t __rev0_464; __rev0_464 = __builtin_shufflevector(__s0_464, __s0_464, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_464 = __noswap_splatq_laneq_p16(__rev0_464, __p1_464); \ - __ret_464 = __builtin_shufflevector(__ret_464, __ret_464, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_464; \ +#define vdupq_laneq_p16(__p0_360, __p1_360) __extension__ ({ \ + poly16x8_t __ret_360; \ + poly16x8_t __s0_360 = __p0_360; \ + poly16x8_t __rev0_360; __rev0_360 = __builtin_shufflevector(__s0_360, __s0_360, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_360 = __noswap_splatq_laneq_p16(__rev0_360, __p1_360); \ + __ret_360 = __builtin_shufflevector(__ret_360, __ret_360, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_360; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vdupq_laneq_u8(__p0_465, __p1_465) __extension__ ({ \ - uint8x16_t __ret_465; \ - uint8x16_t __s0_465 = __p0_465; \ - __ret_465 = splatq_laneq_u8(__s0_465, __p1_465); \ - __ret_465; \ +#define vdupq_laneq_u8(__p0_361, __p1_361) __extension__ ({ \ + uint8x16_t __ret_361; \ + uint8x16_t __s0_361 = __p0_361; \ + __ret_361 = splatq_laneq_u8(__s0_361, __p1_361); \ + __ret_361; \ }) #else -#define vdupq_laneq_u8(__p0_466, __p1_466) __extension__ ({ \ - uint8x16_t __ret_466; \ - uint8x16_t __s0_466 = __p0_466; \ - uint8x16_t __rev0_466; __rev0_466 = __builtin_shufflevector(__s0_466, __s0_466, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_466 = __noswap_splatq_laneq_u8(__rev0_466, __p1_466); \ - __ret_466 = __builtin_shufflevector(__ret_466, __ret_466, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_466; \ +#define vdupq_laneq_u8(__p0_362, __p1_362) __extension__ ({ \ + uint8x16_t __ret_362; \ + uint8x16_t __s0_362 = __p0_362; \ + uint8x16_t __rev0_362; __rev0_362 = __builtin_shufflevector(__s0_362, __s0_362, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_362 = __noswap_splatq_laneq_u8(__rev0_362, __p1_362); \ + __ret_362 = __builtin_shufflevector(__ret_362, __ret_362, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_362; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vdupq_laneq_u32(__p0_467, __p1_467) __extension__ ({ \ - uint32x4_t __ret_467; \ - uint32x4_t __s0_467 = __p0_467; \ - __ret_467 = splatq_laneq_u32(__s0_467, __p1_467); \ - __ret_467; \ +#define vdupq_laneq_u32(__p0_363, __p1_363) __extension__ ({ \ + uint32x4_t __ret_363; \ + uint32x4_t __s0_363 = __p0_363; \ + __ret_363 = splatq_laneq_u32(__s0_363, __p1_363); \ + __ret_363; \ }) #else -#define vdupq_laneq_u32(__p0_468, __p1_468) __extension__ ({ \ - uint32x4_t __ret_468; \ - uint32x4_t __s0_468 = __p0_468; \ - uint32x4_t __rev0_468; __rev0_468 = __builtin_shufflevector(__s0_468, __s0_468, 3, 2, 1, 0); \ - __ret_468 = __noswap_splatq_laneq_u32(__rev0_468, __p1_468); \ - __ret_468 = __builtin_shufflevector(__ret_468, __ret_468, 3, 2, 1, 0); \ - __ret_468; \ +#define vdupq_laneq_u32(__p0_364, __p1_364) __extension__ ({ \ + uint32x4_t __ret_364; \ + uint32x4_t __s0_364 = __p0_364; \ + uint32x4_t __rev0_364; __rev0_364 = __builtin_shufflevector(__s0_364, __s0_364, 3, 2, 1, 0); \ + __ret_364 = __noswap_splatq_laneq_u32(__rev0_364, __p1_364); \ + __ret_364 = __builtin_shufflevector(__ret_364, __ret_364, 3, 2, 1, 0); \ + __ret_364; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vdupq_laneq_u64(__p0_469, __p1_469) __extension__ ({ \ - uint64x2_t __ret_469; \ - uint64x2_t __s0_469 = __p0_469; \ - __ret_469 = splatq_laneq_u64(__s0_469, __p1_469); \ - __ret_469; \ +#define vdupq_laneq_u64(__p0_365, __p1_365) __extension__ ({ \ + uint64x2_t __ret_365; \ + uint64x2_t __s0_365 = __p0_365; \ + __ret_365 = splatq_laneq_u64(__s0_365, __p1_365); \ + __ret_365; \ }) #else -#define vdupq_laneq_u64(__p0_470, __p1_470) __extension__ ({ \ - uint64x2_t __ret_470; \ - uint64x2_t __s0_470 = __p0_470; \ - uint64x2_t __rev0_470; __rev0_470 = __builtin_shufflevector(__s0_470, __s0_470, 1, 0); \ - __ret_470 = __noswap_splatq_laneq_u64(__rev0_470, __p1_470); \ - __ret_470 = __builtin_shufflevector(__ret_470, __ret_470, 1, 0); \ - __ret_470; \ +#define vdupq_laneq_u64(__p0_366, __p1_366) __extension__ ({ \ + uint64x2_t __ret_366; \ + uint64x2_t __s0_366 = __p0_366; \ + uint64x2_t __rev0_366; __rev0_366 = __builtin_shufflevector(__s0_366, __s0_366, 1, 0); \ + __ret_366 = __noswap_splatq_laneq_u64(__rev0_366, __p1_366); \ + __ret_366 = __builtin_shufflevector(__ret_366, __ret_366, 1, 0); \ + __ret_366; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vdupq_laneq_u16(__p0_471, __p1_471) __extension__ ({ \ - uint16x8_t __ret_471; \ - uint16x8_t __s0_471 = __p0_471; \ - __ret_471 = splatq_laneq_u16(__s0_471, __p1_471); \ - __ret_471; \ +#define vdupq_laneq_u16(__p0_367, __p1_367) __extension__ ({ \ + uint16x8_t __ret_367; \ + uint16x8_t __s0_367 = __p0_367; \ + __ret_367 = splatq_laneq_u16(__s0_367, __p1_367); \ + __ret_367; \ }) #else -#define vdupq_laneq_u16(__p0_472, __p1_472) __extension__ ({ \ - uint16x8_t __ret_472; \ - uint16x8_t __s0_472 = __p0_472; \ - uint16x8_t __rev0_472; __rev0_472 = __builtin_shufflevector(__s0_472, __s0_472, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_472 = __noswap_splatq_laneq_u16(__rev0_472, __p1_472); \ - __ret_472 = __builtin_shufflevector(__ret_472, __ret_472, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_472; \ +#define vdupq_laneq_u16(__p0_368, __p1_368) __extension__ ({ \ + uint16x8_t __ret_368; \ + uint16x8_t __s0_368 = __p0_368; \ + uint16x8_t __rev0_368; __rev0_368 = __builtin_shufflevector(__s0_368, __s0_368, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_368 = __noswap_splatq_laneq_u16(__rev0_368, __p1_368); \ + __ret_368 = __builtin_shufflevector(__ret_368, __ret_368, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_368; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vdupq_laneq_s8(__p0_473, __p1_473) __extension__ ({ \ - int8x16_t __ret_473; \ - int8x16_t __s0_473 = __p0_473; \ - __ret_473 = splatq_laneq_s8(__s0_473, __p1_473); \ - __ret_473; \ +#define vdupq_laneq_s8(__p0_369, __p1_369) __extension__ ({ \ + int8x16_t __ret_369; \ + int8x16_t __s0_369 = __p0_369; \ + __ret_369 = splatq_laneq_s8(__s0_369, __p1_369); \ + __ret_369; \ }) #else -#define vdupq_laneq_s8(__p0_474, __p1_474) __extension__ ({ \ - int8x16_t __ret_474; \ - int8x16_t __s0_474 = __p0_474; \ - int8x16_t __rev0_474; __rev0_474 = __builtin_shufflevector(__s0_474, __s0_474, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_474 = __noswap_splatq_laneq_s8(__rev0_474, __p1_474); \ - __ret_474 = __builtin_shufflevector(__ret_474, __ret_474, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_474; \ +#define vdupq_laneq_s8(__p0_370, __p1_370) __extension__ ({ \ + int8x16_t __ret_370; \ + int8x16_t __s0_370 = __p0_370; \ + int8x16_t __rev0_370; __rev0_370 = __builtin_shufflevector(__s0_370, __s0_370, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_370 = __noswap_splatq_laneq_s8(__rev0_370, __p1_370); \ + __ret_370 = __builtin_shufflevector(__ret_370, __ret_370, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_370; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vdupq_laneq_f64(__p0_475, __p1_475) __extension__ ({ \ - float64x2_t __ret_475; \ - float64x2_t __s0_475 = __p0_475; \ - __ret_475 = splatq_laneq_f64(__s0_475, __p1_475); \ - __ret_475; \ +#define vdupq_laneq_f64(__p0_371, __p1_371) __extension__ ({ \ + float64x2_t __ret_371; \ + float64x2_t __s0_371 = __p0_371; \ + __ret_371 = splatq_laneq_f64(__s0_371, __p1_371); \ + __ret_371; \ }) #else -#define vdupq_laneq_f64(__p0_476, __p1_476) __extension__ ({ \ - float64x2_t __ret_476; \ - float64x2_t __s0_476 = __p0_476; \ - float64x2_t __rev0_476; __rev0_476 = __builtin_shufflevector(__s0_476, __s0_476, 1, 0); \ - __ret_476 = __noswap_splatq_laneq_f64(__rev0_476, __p1_476); \ - __ret_476 = __builtin_shufflevector(__ret_476, __ret_476, 1, 0); \ - __ret_476; \ +#define vdupq_laneq_f64(__p0_372, __p1_372) __extension__ ({ \ + float64x2_t __ret_372; \ + float64x2_t __s0_372 = __p0_372; \ + float64x2_t __rev0_372; __rev0_372 = __builtin_shufflevector(__s0_372, __s0_372, 1, 0); \ + __ret_372 = __noswap_splatq_laneq_f64(__rev0_372, __p1_372); \ + __ret_372 = __builtin_shufflevector(__ret_372, __ret_372, 1, 0); \ + __ret_372; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vdupq_laneq_f32(__p0_477, __p1_477) __extension__ ({ \ - float32x4_t __ret_477; \ - float32x4_t __s0_477 = __p0_477; \ - __ret_477 = splatq_laneq_f32(__s0_477, __p1_477); \ - __ret_477; \ +#define vdupq_laneq_f32(__p0_373, __p1_373) __extension__ ({ \ + float32x4_t __ret_373; \ + float32x4_t __s0_373 = __p0_373; \ + __ret_373 = splatq_laneq_f32(__s0_373, __p1_373); \ + __ret_373; \ }) #else -#define vdupq_laneq_f32(__p0_478, __p1_478) __extension__ ({ \ - float32x4_t __ret_478; \ - float32x4_t __s0_478 = __p0_478; \ - float32x4_t __rev0_478; __rev0_478 = __builtin_shufflevector(__s0_478, __s0_478, 3, 2, 1, 0); \ - __ret_478 = __noswap_splatq_laneq_f32(__rev0_478, __p1_478); \ - __ret_478 = __builtin_shufflevector(__ret_478, __ret_478, 3, 2, 1, 0); \ - __ret_478; \ +#define vdupq_laneq_f32(__p0_374, __p1_374) __extension__ ({ \ + float32x4_t __ret_374; \ + float32x4_t __s0_374 = __p0_374; \ + float32x4_t __rev0_374; __rev0_374 = __builtin_shufflevector(__s0_374, __s0_374, 3, 2, 1, 0); \ + __ret_374 = __noswap_splatq_laneq_f32(__rev0_374, __p1_374); \ + __ret_374 = __builtin_shufflevector(__ret_374, __ret_374, 3, 2, 1, 0); \ + __ret_374; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vdupq_laneq_f16(__p0_479, __p1_479) __extension__ ({ \ - float16x8_t __ret_479; \ - float16x8_t __s0_479 = __p0_479; \ - __ret_479 = splatq_laneq_f16(__s0_479, __p1_479); \ - __ret_479; \ +#define vdupq_laneq_f16(__p0_375, __p1_375) __extension__ ({ \ + float16x8_t __ret_375; \ + float16x8_t __s0_375 = __p0_375; \ + __ret_375 = splatq_laneq_f16(__s0_375, __p1_375); \ + __ret_375; \ }) #else -#define vdupq_laneq_f16(__p0_480, __p1_480) __extension__ ({ \ - float16x8_t __ret_480; \ - float16x8_t __s0_480 = __p0_480; \ - float16x8_t __rev0_480; __rev0_480 = __builtin_shufflevector(__s0_480, __s0_480, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_480 = __noswap_splatq_laneq_f16(__rev0_480, __p1_480); \ - __ret_480 = __builtin_shufflevector(__ret_480, __ret_480, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_480; \ +#define vdupq_laneq_f16(__p0_376, __p1_376) __extension__ ({ \ + float16x8_t __ret_376; \ + float16x8_t __s0_376 = __p0_376; \ + float16x8_t __rev0_376; __rev0_376 = __builtin_shufflevector(__s0_376, __s0_376, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_376 = __noswap_splatq_laneq_f16(__rev0_376, __p1_376); \ + __ret_376 = __builtin_shufflevector(__ret_376, __ret_376, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_376; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vdupq_laneq_s32(__p0_481, __p1_481) __extension__ ({ \ - int32x4_t __ret_481; \ - int32x4_t __s0_481 = __p0_481; \ - __ret_481 = splatq_laneq_s32(__s0_481, __p1_481); \ - __ret_481; \ +#define vdupq_laneq_s32(__p0_377, __p1_377) __extension__ ({ \ + int32x4_t __ret_377; \ + int32x4_t __s0_377 = __p0_377; \ + __ret_377 = splatq_laneq_s32(__s0_377, __p1_377); \ + __ret_377; \ }) #else -#define vdupq_laneq_s32(__p0_482, __p1_482) __extension__ ({ \ - int32x4_t __ret_482; \ - int32x4_t __s0_482 = __p0_482; \ - int32x4_t __rev0_482; __rev0_482 = __builtin_shufflevector(__s0_482, __s0_482, 3, 2, 1, 0); \ - __ret_482 = __noswap_splatq_laneq_s32(__rev0_482, __p1_482); \ - __ret_482 = __builtin_shufflevector(__ret_482, __ret_482, 3, 2, 1, 0); \ - __ret_482; \ +#define vdupq_laneq_s32(__p0_378, __p1_378) __extension__ ({ \ + int32x4_t __ret_378; \ + int32x4_t __s0_378 = __p0_378; \ + int32x4_t __rev0_378; __rev0_378 = __builtin_shufflevector(__s0_378, __s0_378, 3, 2, 1, 0); \ + __ret_378 = __noswap_splatq_laneq_s32(__rev0_378, __p1_378); \ + __ret_378 = __builtin_shufflevector(__ret_378, __ret_378, 3, 2, 1, 0); \ + __ret_378; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vdupq_laneq_s64(__p0_483, __p1_483) __extension__ ({ \ - int64x2_t __ret_483; \ - int64x2_t __s0_483 = __p0_483; \ - __ret_483 = splatq_laneq_s64(__s0_483, __p1_483); \ - __ret_483; \ +#define vdupq_laneq_s64(__p0_379, __p1_379) __extension__ ({ \ + int64x2_t __ret_379; \ + int64x2_t __s0_379 = __p0_379; \ + __ret_379 = splatq_laneq_s64(__s0_379, __p1_379); \ + __ret_379; \ }) #else -#define vdupq_laneq_s64(__p0_484, __p1_484) __extension__ ({ \ - int64x2_t __ret_484; \ - int64x2_t __s0_484 = __p0_484; \ - int64x2_t __rev0_484; __rev0_484 = __builtin_shufflevector(__s0_484, __s0_484, 1, 0); \ - __ret_484 = __noswap_splatq_laneq_s64(__rev0_484, __p1_484); \ - __ret_484 = __builtin_shufflevector(__ret_484, __ret_484, 1, 0); \ - __ret_484; \ +#define vdupq_laneq_s64(__p0_380, __p1_380) __extension__ ({ \ + int64x2_t __ret_380; \ + int64x2_t __s0_380 = __p0_380; \ + int64x2_t __rev0_380; __rev0_380 = __builtin_shufflevector(__s0_380, __s0_380, 1, 0); \ + __ret_380 = __noswap_splatq_laneq_s64(__rev0_380, __p1_380); \ + __ret_380 = __builtin_shufflevector(__ret_380, __ret_380, 1, 0); \ + __ret_380; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vdupq_laneq_s16(__p0_485, __p1_485) __extension__ ({ \ - int16x8_t __ret_485; \ - int16x8_t __s0_485 = __p0_485; \ - __ret_485 = splatq_laneq_s16(__s0_485, __p1_485); \ - __ret_485; \ +#define vdupq_laneq_s16(__p0_381, __p1_381) __extension__ ({ \ + int16x8_t __ret_381; \ + int16x8_t __s0_381 = __p0_381; \ + __ret_381 = splatq_laneq_s16(__s0_381, __p1_381); \ + __ret_381; \ }) #else -#define vdupq_laneq_s16(__p0_486, __p1_486) __extension__ ({ \ - int16x8_t __ret_486; \ - int16x8_t __s0_486 = __p0_486; \ - int16x8_t __rev0_486; __rev0_486 = __builtin_shufflevector(__s0_486, __s0_486, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_486 = __noswap_splatq_laneq_s16(__rev0_486, __p1_486); \ - __ret_486 = __builtin_shufflevector(__ret_486, __ret_486, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_486; \ +#define vdupq_laneq_s16(__p0_382, __p1_382) __extension__ ({ \ + int16x8_t __ret_382; \ + int16x8_t __s0_382 = __p0_382; \ + int16x8_t __rev0_382; __rev0_382 = __builtin_shufflevector(__s0_382, __s0_382, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_382 = __noswap_splatq_laneq_s16(__rev0_382, __p1_382); \ + __ret_382 = __builtin_shufflevector(__ret_382, __ret_382, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_382; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vdup_laneq_u8(__p0_487, __p1_487) __extension__ ({ \ - uint8x8_t __ret_487; \ - uint8x16_t __s0_487 = __p0_487; \ - __ret_487 = splat_laneq_u8(__s0_487, __p1_487); \ - __ret_487; \ +#define vdup_laneq_u8(__p0_383, __p1_383) __extension__ ({ \ + uint8x8_t __ret_383; \ + uint8x16_t __s0_383 = __p0_383; \ + __ret_383 = splat_laneq_u8(__s0_383, __p1_383); \ + __ret_383; \ }) #else -#define vdup_laneq_u8(__p0_488, __p1_488) __extension__ ({ \ - uint8x8_t __ret_488; \ - uint8x16_t __s0_488 = __p0_488; \ - uint8x16_t __rev0_488; __rev0_488 = __builtin_shufflevector(__s0_488, __s0_488, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_488 = __noswap_splat_laneq_u8(__rev0_488, __p1_488); \ - __ret_488 = __builtin_shufflevector(__ret_488, __ret_488, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_488; \ +#define vdup_laneq_u8(__p0_384, __p1_384) __extension__ ({ \ + uint8x8_t __ret_384; \ + uint8x16_t __s0_384 = __p0_384; \ + uint8x16_t __rev0_384; __rev0_384 = __builtin_shufflevector(__s0_384, __s0_384, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_384 = __noswap_splat_laneq_u8(__rev0_384, __p1_384); \ + __ret_384 = __builtin_shufflevector(__ret_384, __ret_384, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_384; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vdup_laneq_u32(__p0_489, __p1_489) __extension__ ({ \ - uint32x2_t __ret_489; \ - uint32x4_t __s0_489 = __p0_489; \ - __ret_489 = splat_laneq_u32(__s0_489, __p1_489); \ - __ret_489; \ +#define vdup_laneq_u32(__p0_385, __p1_385) __extension__ ({ \ + uint32x2_t __ret_385; \ + uint32x4_t __s0_385 = __p0_385; \ + __ret_385 = splat_laneq_u32(__s0_385, __p1_385); \ + __ret_385; \ }) #else -#define vdup_laneq_u32(__p0_490, __p1_490) __extension__ ({ \ - uint32x2_t __ret_490; \ - uint32x4_t __s0_490 = __p0_490; \ - uint32x4_t __rev0_490; __rev0_490 = __builtin_shufflevector(__s0_490, __s0_490, 3, 2, 1, 0); \ - __ret_490 = __noswap_splat_laneq_u32(__rev0_490, __p1_490); \ - __ret_490 = __builtin_shufflevector(__ret_490, __ret_490, 1, 0); \ - __ret_490; \ +#define vdup_laneq_u32(__p0_386, __p1_386) __extension__ ({ \ + uint32x2_t __ret_386; \ + uint32x4_t __s0_386 = __p0_386; \ + uint32x4_t __rev0_386; __rev0_386 = __builtin_shufflevector(__s0_386, __s0_386, 3, 2, 1, 0); \ + __ret_386 = __noswap_splat_laneq_u32(__rev0_386, __p1_386); \ + __ret_386 = __builtin_shufflevector(__ret_386, __ret_386, 1, 0); \ + __ret_386; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vdup_laneq_u64(__p0_491, __p1_491) __extension__ ({ \ - uint64x1_t __ret_491; \ - uint64x2_t __s0_491 = __p0_491; \ - __ret_491 = splat_laneq_u64(__s0_491, __p1_491); \ - __ret_491; \ +#define vdup_laneq_u64(__p0_387, __p1_387) __extension__ ({ \ + uint64x1_t __ret_387; \ + uint64x2_t __s0_387 = __p0_387; \ + __ret_387 = splat_laneq_u64(__s0_387, __p1_387); \ + __ret_387; \ }) #else -#define vdup_laneq_u64(__p0_492, __p1_492) __extension__ ({ \ - uint64x1_t __ret_492; \ - uint64x2_t __s0_492 = __p0_492; \ - uint64x2_t __rev0_492; __rev0_492 = __builtin_shufflevector(__s0_492, __s0_492, 1, 0); \ - __ret_492 = __noswap_splat_laneq_u64(__rev0_492, __p1_492); \ - __ret_492; \ +#define vdup_laneq_u64(__p0_388, __p1_388) __extension__ ({ \ + uint64x1_t __ret_388; \ + uint64x2_t __s0_388 = __p0_388; \ + uint64x2_t __rev0_388; __rev0_388 = __builtin_shufflevector(__s0_388, __s0_388, 1, 0); \ + __ret_388 = __noswap_splat_laneq_u64(__rev0_388, __p1_388); \ + __ret_388; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vdup_laneq_u16(__p0_493, __p1_493) __extension__ ({ \ - uint16x4_t __ret_493; \ - uint16x8_t __s0_493 = __p0_493; \ - __ret_493 = splat_laneq_u16(__s0_493, __p1_493); \ - __ret_493; \ +#define vdup_laneq_u16(__p0_389, __p1_389) __extension__ ({ \ + uint16x4_t __ret_389; \ + uint16x8_t __s0_389 = __p0_389; \ + __ret_389 = splat_laneq_u16(__s0_389, __p1_389); \ + __ret_389; \ }) #else -#define vdup_laneq_u16(__p0_494, __p1_494) __extension__ ({ \ - uint16x4_t __ret_494; \ - uint16x8_t __s0_494 = __p0_494; \ - uint16x8_t __rev0_494; __rev0_494 = __builtin_shufflevector(__s0_494, __s0_494, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_494 = __noswap_splat_laneq_u16(__rev0_494, __p1_494); \ - __ret_494 = __builtin_shufflevector(__ret_494, __ret_494, 3, 2, 1, 0); \ - __ret_494; \ +#define vdup_laneq_u16(__p0_390, __p1_390) __extension__ ({ \ + uint16x4_t __ret_390; \ + uint16x8_t __s0_390 = __p0_390; \ + uint16x8_t __rev0_390; __rev0_390 = __builtin_shufflevector(__s0_390, __s0_390, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_390 = __noswap_splat_laneq_u16(__rev0_390, __p1_390); \ + __ret_390 = __builtin_shufflevector(__ret_390, __ret_390, 3, 2, 1, 0); \ + __ret_390; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vdup_laneq_s8(__p0_495, __p1_495) __extension__ ({ \ - int8x8_t __ret_495; \ - int8x16_t __s0_495 = __p0_495; \ - __ret_495 = splat_laneq_s8(__s0_495, __p1_495); \ - __ret_495; \ +#define vdup_laneq_s8(__p0_391, __p1_391) __extension__ ({ \ + int8x8_t __ret_391; \ + int8x16_t __s0_391 = __p0_391; \ + __ret_391 = splat_laneq_s8(__s0_391, __p1_391); \ + __ret_391; \ }) #else -#define vdup_laneq_s8(__p0_496, __p1_496) __extension__ ({ \ - int8x8_t __ret_496; \ - int8x16_t __s0_496 = __p0_496; \ - int8x16_t __rev0_496; __rev0_496 = __builtin_shufflevector(__s0_496, __s0_496, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_496 = __noswap_splat_laneq_s8(__rev0_496, __p1_496); \ - __ret_496 = __builtin_shufflevector(__ret_496, __ret_496, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_496; \ +#define vdup_laneq_s8(__p0_392, __p1_392) __extension__ ({ \ + int8x8_t __ret_392; \ + int8x16_t __s0_392 = __p0_392; \ + int8x16_t __rev0_392; __rev0_392 = __builtin_shufflevector(__s0_392, __s0_392, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_392 = __noswap_splat_laneq_s8(__rev0_392, __p1_392); \ + __ret_392 = __builtin_shufflevector(__ret_392, __ret_392, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_392; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vdup_laneq_f64(__p0_497, __p1_497) __extension__ ({ \ - float64x1_t __ret_497; \ - float64x2_t __s0_497 = __p0_497; \ - __ret_497 = splat_laneq_f64(__s0_497, __p1_497); \ - __ret_497; \ +#define vdup_laneq_f64(__p0_393, __p1_393) __extension__ ({ \ + float64x1_t __ret_393; \ + float64x2_t __s0_393 = __p0_393; \ + __ret_393 = splat_laneq_f64(__s0_393, __p1_393); \ + __ret_393; \ }) #else -#define vdup_laneq_f64(__p0_498, __p1_498) __extension__ ({ \ - float64x1_t __ret_498; \ - float64x2_t __s0_498 = __p0_498; \ - float64x2_t __rev0_498; __rev0_498 = __builtin_shufflevector(__s0_498, __s0_498, 1, 0); \ - __ret_498 = __noswap_splat_laneq_f64(__rev0_498, __p1_498); \ - __ret_498; \ +#define vdup_laneq_f64(__p0_394, __p1_394) __extension__ ({ \ + float64x1_t __ret_394; \ + float64x2_t __s0_394 = __p0_394; \ + float64x2_t __rev0_394; __rev0_394 = __builtin_shufflevector(__s0_394, __s0_394, 1, 0); \ + __ret_394 = __noswap_splat_laneq_f64(__rev0_394, __p1_394); \ + __ret_394; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vdup_laneq_f32(__p0_499, __p1_499) __extension__ ({ \ - float32x2_t __ret_499; \ - float32x4_t __s0_499 = __p0_499; \ - __ret_499 = splat_laneq_f32(__s0_499, __p1_499); \ - __ret_499; \ +#define vdup_laneq_f32(__p0_395, __p1_395) __extension__ ({ \ + float32x2_t __ret_395; \ + float32x4_t __s0_395 = __p0_395; \ + __ret_395 = splat_laneq_f32(__s0_395, __p1_395); \ + __ret_395; \ }) #else -#define vdup_laneq_f32(__p0_500, __p1_500) __extension__ ({ \ - float32x2_t __ret_500; \ - float32x4_t __s0_500 = __p0_500; \ - float32x4_t __rev0_500; __rev0_500 = __builtin_shufflevector(__s0_500, __s0_500, 3, 2, 1, 0); \ - __ret_500 = __noswap_splat_laneq_f32(__rev0_500, __p1_500); \ - __ret_500 = __builtin_shufflevector(__ret_500, __ret_500, 1, 0); \ - __ret_500; \ +#define vdup_laneq_f32(__p0_396, __p1_396) __extension__ ({ \ + float32x2_t __ret_396; \ + float32x4_t __s0_396 = __p0_396; \ + float32x4_t __rev0_396; __rev0_396 = __builtin_shufflevector(__s0_396, __s0_396, 3, 2, 1, 0); \ + __ret_396 = __noswap_splat_laneq_f32(__rev0_396, __p1_396); \ + __ret_396 = __builtin_shufflevector(__ret_396, __ret_396, 1, 0); \ + __ret_396; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vdup_laneq_f16(__p0_501, __p1_501) __extension__ ({ \ - float16x4_t __ret_501; \ - float16x8_t __s0_501 = __p0_501; \ - __ret_501 = splat_laneq_f16(__s0_501, __p1_501); \ - __ret_501; \ +#define vdup_laneq_f16(__p0_397, __p1_397) __extension__ ({ \ + float16x4_t __ret_397; \ + float16x8_t __s0_397 = __p0_397; \ + __ret_397 = splat_laneq_f16(__s0_397, __p1_397); \ + __ret_397; \ }) #else -#define vdup_laneq_f16(__p0_502, __p1_502) __extension__ ({ \ - float16x4_t __ret_502; \ - float16x8_t __s0_502 = __p0_502; \ - float16x8_t __rev0_502; __rev0_502 = __builtin_shufflevector(__s0_502, __s0_502, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_502 = __noswap_splat_laneq_f16(__rev0_502, __p1_502); \ - __ret_502 = __builtin_shufflevector(__ret_502, __ret_502, 3, 2, 1, 0); \ - __ret_502; \ +#define vdup_laneq_f16(__p0_398, __p1_398) __extension__ ({ \ + float16x4_t __ret_398; \ + float16x8_t __s0_398 = __p0_398; \ + float16x8_t __rev0_398; __rev0_398 = __builtin_shufflevector(__s0_398, __s0_398, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_398 = __noswap_splat_laneq_f16(__rev0_398, __p1_398); \ + __ret_398 = __builtin_shufflevector(__ret_398, __ret_398, 3, 2, 1, 0); \ + __ret_398; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vdup_laneq_s32(__p0_503, __p1_503) __extension__ ({ \ - int32x2_t __ret_503; \ - int32x4_t __s0_503 = __p0_503; \ - __ret_503 = splat_laneq_s32(__s0_503, __p1_503); \ - __ret_503; \ +#define vdup_laneq_s32(__p0_399, __p1_399) __extension__ ({ \ + int32x2_t __ret_399; \ + int32x4_t __s0_399 = __p0_399; \ + __ret_399 = splat_laneq_s32(__s0_399, __p1_399); \ + __ret_399; \ }) #else -#define vdup_laneq_s32(__p0_504, __p1_504) __extension__ ({ \ - int32x2_t __ret_504; \ - int32x4_t __s0_504 = __p0_504; \ - int32x4_t __rev0_504; __rev0_504 = __builtin_shufflevector(__s0_504, __s0_504, 3, 2, 1, 0); \ - __ret_504 = __noswap_splat_laneq_s32(__rev0_504, __p1_504); \ - __ret_504 = __builtin_shufflevector(__ret_504, __ret_504, 1, 0); \ - __ret_504; \ +#define vdup_laneq_s32(__p0_400, __p1_400) __extension__ ({ \ + int32x2_t __ret_400; \ + int32x4_t __s0_400 = __p0_400; \ + int32x4_t __rev0_400; __rev0_400 = __builtin_shufflevector(__s0_400, __s0_400, 3, 2, 1, 0); \ + __ret_400 = __noswap_splat_laneq_s32(__rev0_400, __p1_400); \ + __ret_400 = __builtin_shufflevector(__ret_400, __ret_400, 1, 0); \ + __ret_400; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vdup_laneq_s64(__p0_505, __p1_505) __extension__ ({ \ - int64x1_t __ret_505; \ - int64x2_t __s0_505 = __p0_505; \ - __ret_505 = splat_laneq_s64(__s0_505, __p1_505); \ - __ret_505; \ +#define vdup_laneq_s64(__p0_401, __p1_401) __extension__ ({ \ + int64x1_t __ret_401; \ + int64x2_t __s0_401 = __p0_401; \ + __ret_401 = splat_laneq_s64(__s0_401, __p1_401); \ + __ret_401; \ }) #else -#define vdup_laneq_s64(__p0_506, __p1_506) __extension__ ({ \ - int64x1_t __ret_506; \ - int64x2_t __s0_506 = __p0_506; \ - int64x2_t __rev0_506; __rev0_506 = __builtin_shufflevector(__s0_506, __s0_506, 1, 0); \ - __ret_506 = __noswap_splat_laneq_s64(__rev0_506, __p1_506); \ - __ret_506; \ +#define vdup_laneq_s64(__p0_402, __p1_402) __extension__ ({ \ + int64x1_t __ret_402; \ + int64x2_t __s0_402 = __p0_402; \ + int64x2_t __rev0_402; __rev0_402 = __builtin_shufflevector(__s0_402, __s0_402, 1, 0); \ + __ret_402 = __noswap_splat_laneq_s64(__rev0_402, __p1_402); \ + __ret_402; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vdup_laneq_s16(__p0_507, __p1_507) __extension__ ({ \ - int16x4_t __ret_507; \ - int16x8_t __s0_507 = __p0_507; \ - __ret_507 = splat_laneq_s16(__s0_507, __p1_507); \ - __ret_507; \ +#define vdup_laneq_s16(__p0_403, __p1_403) __extension__ ({ \ + int16x4_t __ret_403; \ + int16x8_t __s0_403 = __p0_403; \ + __ret_403 = splat_laneq_s16(__s0_403, __p1_403); \ + __ret_403; \ }) #else -#define vdup_laneq_s16(__p0_508, __p1_508) __extension__ ({ \ - int16x4_t __ret_508; \ - int16x8_t __s0_508 = __p0_508; \ - int16x8_t __rev0_508; __rev0_508 = __builtin_shufflevector(__s0_508, __s0_508, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_508 = __noswap_splat_laneq_s16(__rev0_508, __p1_508); \ - __ret_508 = __builtin_shufflevector(__ret_508, __ret_508, 3, 2, 1, 0); \ - __ret_508; \ +#define vdup_laneq_s16(__p0_404, __p1_404) __extension__ ({ \ + int16x4_t __ret_404; \ + int16x8_t __s0_404 = __p0_404; \ + int16x8_t __rev0_404; __rev0_404 = __builtin_shufflevector(__s0_404, __s0_404, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_404 = __noswap_splat_laneq_s16(__rev0_404, __p1_404); \ + __ret_404 = __builtin_shufflevector(__ret_404, __ret_404, 3, 2, 1, 0); \ + __ret_404; \ }) #endif @@ -53091,246 +47184,246 @@ __ai float64x1_t vfms_f64(float64x1_t __p0, float64x1_t __p1, float64x1_t __p2) __ret = vfma_f64(__p0, -__p1, __p2); return __ret; } -#define vfmsd_lane_f64(__p0_509, __p1_509, __p2_509, __p3_509) __extension__ ({ \ - float64_t __ret_509; \ - float64_t __s0_509 = __p0_509; \ - float64_t __s1_509 = __p1_509; \ - float64x1_t __s2_509 = __p2_509; \ - __ret_509 = vfmad_lane_f64(__s0_509, -__s1_509, __s2_509, __p3_509); \ - __ret_509; \ +#define vfmsd_lane_f64(__p0_405, __p1_405, __p2_405, __p3_405) __extension__ ({ \ + float64_t __ret_405; \ + float64_t __s0_405 = __p0_405; \ + float64_t __s1_405 = __p1_405; \ + float64x1_t __s2_405 = __p2_405; \ + __ret_405 = vfmad_lane_f64(__s0_405, -__s1_405, __s2_405, __p3_405); \ + __ret_405; \ }) #ifdef __LITTLE_ENDIAN__ -#define vfmss_lane_f32(__p0_510, __p1_510, __p2_510, __p3_510) __extension__ ({ \ - float32_t __ret_510; \ - float32_t __s0_510 = __p0_510; \ - float32_t __s1_510 = __p1_510; \ - float32x2_t __s2_510 = __p2_510; \ - __ret_510 = vfmas_lane_f32(__s0_510, -__s1_510, __s2_510, __p3_510); \ - __ret_510; \ +#define vfmss_lane_f32(__p0_406, __p1_406, __p2_406, __p3_406) __extension__ ({ \ + float32_t __ret_406; \ + float32_t __s0_406 = __p0_406; \ + float32_t __s1_406 = __p1_406; \ + float32x2_t __s2_406 = __p2_406; \ + __ret_406 = vfmas_lane_f32(__s0_406, -__s1_406, __s2_406, __p3_406); \ + __ret_406; \ }) #else -#define vfmss_lane_f32(__p0_511, __p1_511, __p2_511, __p3_511) __extension__ ({ \ - float32_t __ret_511; \ - float32_t __s0_511 = __p0_511; \ - float32_t __s1_511 = __p1_511; \ - float32x2_t __s2_511 = __p2_511; \ - float32x2_t __rev2_511; __rev2_511 = __builtin_shufflevector(__s2_511, __s2_511, 1, 0); \ - __ret_511 = __noswap_vfmas_lane_f32(__s0_511, -__s1_511, __rev2_511, __p3_511); \ - __ret_511; \ +#define vfmss_lane_f32(__p0_407, __p1_407, __p2_407, __p3_407) __extension__ ({ \ + float32_t __ret_407; \ + float32_t __s0_407 = __p0_407; \ + float32_t __s1_407 = __p1_407; \ + float32x2_t __s2_407 = __p2_407; \ + float32x2_t __rev2_407; __rev2_407 = __builtin_shufflevector(__s2_407, __s2_407, 1, 0); \ + __ret_407 = __noswap_vfmas_lane_f32(__s0_407, -__s1_407, __rev2_407, __p3_407); \ + __ret_407; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vfmsq_lane_f64(__p0_512, __p1_512, __p2_512, __p3_512) __extension__ ({ \ - float64x2_t __ret_512; \ - float64x2_t __s0_512 = __p0_512; \ - float64x2_t __s1_512 = __p1_512; \ - float64x1_t __s2_512 = __p2_512; \ - __ret_512 = vfmaq_lane_f64(__s0_512, -__s1_512, __s2_512, __p3_512); \ - __ret_512; \ +#define vfmsq_lane_f64(__p0_408, __p1_408, __p2_408, __p3_408) __extension__ ({ \ + float64x2_t __ret_408; \ + float64x2_t __s0_408 = __p0_408; \ + float64x2_t __s1_408 = __p1_408; \ + float64x1_t __s2_408 = __p2_408; \ + __ret_408 = vfmaq_lane_f64(__s0_408, -__s1_408, __s2_408, __p3_408); \ + __ret_408; \ }) #else -#define vfmsq_lane_f64(__p0_513, __p1_513, __p2_513, __p3_513) __extension__ ({ \ - float64x2_t __ret_513; \ - float64x2_t __s0_513 = __p0_513; \ - float64x2_t __s1_513 = __p1_513; \ - float64x1_t __s2_513 = __p2_513; \ - float64x2_t __rev0_513; __rev0_513 = __builtin_shufflevector(__s0_513, __s0_513, 1, 0); \ - float64x2_t __rev1_513; __rev1_513 = __builtin_shufflevector(__s1_513, __s1_513, 1, 0); \ - __ret_513 = __noswap_vfmaq_lane_f64(__rev0_513, -__rev1_513, __s2_513, __p3_513); \ - __ret_513 = __builtin_shufflevector(__ret_513, __ret_513, 1, 0); \ - __ret_513; \ +#define vfmsq_lane_f64(__p0_409, __p1_409, __p2_409, __p3_409) __extension__ ({ \ + float64x2_t __ret_409; \ + float64x2_t __s0_409 = __p0_409; \ + float64x2_t __s1_409 = __p1_409; \ + float64x1_t __s2_409 = __p2_409; \ + float64x2_t __rev0_409; __rev0_409 = __builtin_shufflevector(__s0_409, __s0_409, 1, 0); \ + float64x2_t __rev1_409; __rev1_409 = __builtin_shufflevector(__s1_409, __s1_409, 1, 0); \ + __ret_409 = __noswap_vfmaq_lane_f64(__rev0_409, -__rev1_409, __s2_409, __p3_409); \ + __ret_409 = __builtin_shufflevector(__ret_409, __ret_409, 1, 0); \ + __ret_409; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vfmsq_lane_f32(__p0_514, __p1_514, __p2_514, __p3_514) __extension__ ({ \ - float32x4_t __ret_514; \ - float32x4_t __s0_514 = __p0_514; \ - float32x4_t __s1_514 = __p1_514; \ - float32x2_t __s2_514 = __p2_514; \ - __ret_514 = vfmaq_lane_f32(__s0_514, -__s1_514, __s2_514, __p3_514); \ - __ret_514; \ +#define vfmsq_lane_f32(__p0_410, __p1_410, __p2_410, __p3_410) __extension__ ({ \ + float32x4_t __ret_410; \ + float32x4_t __s0_410 = __p0_410; \ + float32x4_t __s1_410 = __p1_410; \ + float32x2_t __s2_410 = __p2_410; \ + __ret_410 = vfmaq_lane_f32(__s0_410, -__s1_410, __s2_410, __p3_410); \ + __ret_410; \ }) #else -#define vfmsq_lane_f32(__p0_515, __p1_515, __p2_515, __p3_515) __extension__ ({ \ - float32x4_t __ret_515; \ - float32x4_t __s0_515 = __p0_515; \ - float32x4_t __s1_515 = __p1_515; \ - float32x2_t __s2_515 = __p2_515; \ - float32x4_t __rev0_515; __rev0_515 = __builtin_shufflevector(__s0_515, __s0_515, 3, 2, 1, 0); \ - float32x4_t __rev1_515; __rev1_515 = __builtin_shufflevector(__s1_515, __s1_515, 3, 2, 1, 0); \ - float32x2_t __rev2_515; __rev2_515 = __builtin_shufflevector(__s2_515, __s2_515, 1, 0); \ - __ret_515 = __noswap_vfmaq_lane_f32(__rev0_515, -__rev1_515, __rev2_515, __p3_515); \ - __ret_515 = __builtin_shufflevector(__ret_515, __ret_515, 3, 2, 1, 0); \ - __ret_515; \ +#define vfmsq_lane_f32(__p0_411, __p1_411, __p2_411, __p3_411) __extension__ ({ \ + float32x4_t __ret_411; \ + float32x4_t __s0_411 = __p0_411; \ + float32x4_t __s1_411 = __p1_411; \ + float32x2_t __s2_411 = __p2_411; \ + float32x4_t __rev0_411; __rev0_411 = __builtin_shufflevector(__s0_411, __s0_411, 3, 2, 1, 0); \ + float32x4_t __rev1_411; __rev1_411 = __builtin_shufflevector(__s1_411, __s1_411, 3, 2, 1, 0); \ + float32x2_t __rev2_411; __rev2_411 = __builtin_shufflevector(__s2_411, __s2_411, 1, 0); \ + __ret_411 = __noswap_vfmaq_lane_f32(__rev0_411, -__rev1_411, __rev2_411, __p3_411); \ + __ret_411 = __builtin_shufflevector(__ret_411, __ret_411, 3, 2, 1, 0); \ + __ret_411; \ }) #endif -#define vfms_lane_f64(__p0_516, __p1_516, __p2_516, __p3_516) __extension__ ({ \ - float64x1_t __ret_516; \ - float64x1_t __s0_516 = __p0_516; \ - float64x1_t __s1_516 = __p1_516; \ - float64x1_t __s2_516 = __p2_516; \ - __ret_516 = vfma_lane_f64(__s0_516, -__s1_516, __s2_516, __p3_516); \ - __ret_516; \ +#define vfms_lane_f64(__p0_412, __p1_412, __p2_412, __p3_412) __extension__ ({ \ + float64x1_t __ret_412; \ + float64x1_t __s0_412 = __p0_412; \ + float64x1_t __s1_412 = __p1_412; \ + float64x1_t __s2_412 = __p2_412; \ + __ret_412 = vfma_lane_f64(__s0_412, -__s1_412, __s2_412, __p3_412); \ + __ret_412; \ }) #ifdef __LITTLE_ENDIAN__ -#define vfms_lane_f32(__p0_517, __p1_517, __p2_517, __p3_517) __extension__ ({ \ - float32x2_t __ret_517; \ - float32x2_t __s0_517 = __p0_517; \ - float32x2_t __s1_517 = __p1_517; \ - float32x2_t __s2_517 = __p2_517; \ - __ret_517 = vfma_lane_f32(__s0_517, -__s1_517, __s2_517, __p3_517); \ - __ret_517; \ +#define vfms_lane_f32(__p0_413, __p1_413, __p2_413, __p3_413) __extension__ ({ \ + float32x2_t __ret_413; \ + float32x2_t __s0_413 = __p0_413; \ + float32x2_t __s1_413 = __p1_413; \ + float32x2_t __s2_413 = __p2_413; \ + __ret_413 = vfma_lane_f32(__s0_413, -__s1_413, __s2_413, __p3_413); \ + __ret_413; \ }) #else -#define vfms_lane_f32(__p0_518, __p1_518, __p2_518, __p3_518) __extension__ ({ \ - float32x2_t __ret_518; \ - float32x2_t __s0_518 = __p0_518; \ - float32x2_t __s1_518 = __p1_518; \ - float32x2_t __s2_518 = __p2_518; \ - float32x2_t __rev0_518; __rev0_518 = __builtin_shufflevector(__s0_518, __s0_518, 1, 0); \ - float32x2_t __rev1_518; __rev1_518 = __builtin_shufflevector(__s1_518, __s1_518, 1, 0); \ - float32x2_t __rev2_518; __rev2_518 = __builtin_shufflevector(__s2_518, __s2_518, 1, 0); \ - __ret_518 = __noswap_vfma_lane_f32(__rev0_518, -__rev1_518, __rev2_518, __p3_518); \ - __ret_518 = __builtin_shufflevector(__ret_518, __ret_518, 1, 0); \ - __ret_518; \ +#define vfms_lane_f32(__p0_414, __p1_414, __p2_414, __p3_414) __extension__ ({ \ + float32x2_t __ret_414; \ + float32x2_t __s0_414 = __p0_414; \ + float32x2_t __s1_414 = __p1_414; \ + float32x2_t __s2_414 = __p2_414; \ + float32x2_t __rev0_414; __rev0_414 = __builtin_shufflevector(__s0_414, __s0_414, 1, 0); \ + float32x2_t __rev1_414; __rev1_414 = __builtin_shufflevector(__s1_414, __s1_414, 1, 0); \ + float32x2_t __rev2_414; __rev2_414 = __builtin_shufflevector(__s2_414, __s2_414, 1, 0); \ + __ret_414 = __noswap_vfma_lane_f32(__rev0_414, -__rev1_414, __rev2_414, __p3_414); \ + __ret_414 = __builtin_shufflevector(__ret_414, __ret_414, 1, 0); \ + __ret_414; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vfmsd_laneq_f64(__p0_519, __p1_519, __p2_519, __p3_519) __extension__ ({ \ - float64_t __ret_519; \ - float64_t __s0_519 = __p0_519; \ - float64_t __s1_519 = __p1_519; \ - float64x2_t __s2_519 = __p2_519; \ - __ret_519 = vfmad_laneq_f64(__s0_519, -__s1_519, __s2_519, __p3_519); \ - __ret_519; \ +#define vfmsd_laneq_f64(__p0_415, __p1_415, __p2_415, __p3_415) __extension__ ({ \ + float64_t __ret_415; \ + float64_t __s0_415 = __p0_415; \ + float64_t __s1_415 = __p1_415; \ + float64x2_t __s2_415 = __p2_415; \ + __ret_415 = vfmad_laneq_f64(__s0_415, -__s1_415, __s2_415, __p3_415); \ + __ret_415; \ }) #else -#define vfmsd_laneq_f64(__p0_520, __p1_520, __p2_520, __p3_520) __extension__ ({ \ - float64_t __ret_520; \ - float64_t __s0_520 = __p0_520; \ - float64_t __s1_520 = __p1_520; \ - float64x2_t __s2_520 = __p2_520; \ - float64x2_t __rev2_520; __rev2_520 = __builtin_shufflevector(__s2_520, __s2_520, 1, 0); \ - __ret_520 = __noswap_vfmad_laneq_f64(__s0_520, -__s1_520, __rev2_520, __p3_520); \ - __ret_520; \ +#define vfmsd_laneq_f64(__p0_416, __p1_416, __p2_416, __p3_416) __extension__ ({ \ + float64_t __ret_416; \ + float64_t __s0_416 = __p0_416; \ + float64_t __s1_416 = __p1_416; \ + float64x2_t __s2_416 = __p2_416; \ + float64x2_t __rev2_416; __rev2_416 = __builtin_shufflevector(__s2_416, __s2_416, 1, 0); \ + __ret_416 = __noswap_vfmad_laneq_f64(__s0_416, -__s1_416, __rev2_416, __p3_416); \ + __ret_416; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vfmss_laneq_f32(__p0_521, __p1_521, __p2_521, __p3_521) __extension__ ({ \ - float32_t __ret_521; \ - float32_t __s0_521 = __p0_521; \ - float32_t __s1_521 = __p1_521; \ - float32x4_t __s2_521 = __p2_521; \ - __ret_521 = vfmas_laneq_f32(__s0_521, -__s1_521, __s2_521, __p3_521); \ - __ret_521; \ +#define vfmss_laneq_f32(__p0_417, __p1_417, __p2_417, __p3_417) __extension__ ({ \ + float32_t __ret_417; \ + float32_t __s0_417 = __p0_417; \ + float32_t __s1_417 = __p1_417; \ + float32x4_t __s2_417 = __p2_417; \ + __ret_417 = vfmas_laneq_f32(__s0_417, -__s1_417, __s2_417, __p3_417); \ + __ret_417; \ }) #else -#define vfmss_laneq_f32(__p0_522, __p1_522, __p2_522, __p3_522) __extension__ ({ \ - float32_t __ret_522; \ - float32_t __s0_522 = __p0_522; \ - float32_t __s1_522 = __p1_522; \ - float32x4_t __s2_522 = __p2_522; \ - float32x4_t __rev2_522; __rev2_522 = __builtin_shufflevector(__s2_522, __s2_522, 3, 2, 1, 0); \ - __ret_522 = __noswap_vfmas_laneq_f32(__s0_522, -__s1_522, __rev2_522, __p3_522); \ - __ret_522; \ +#define vfmss_laneq_f32(__p0_418, __p1_418, __p2_418, __p3_418) __extension__ ({ \ + float32_t __ret_418; \ + float32_t __s0_418 = __p0_418; \ + float32_t __s1_418 = __p1_418; \ + float32x4_t __s2_418 = __p2_418; \ + float32x4_t __rev2_418; __rev2_418 = __builtin_shufflevector(__s2_418, __s2_418, 3, 2, 1, 0); \ + __ret_418 = __noswap_vfmas_laneq_f32(__s0_418, -__s1_418, __rev2_418, __p3_418); \ + __ret_418; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vfmsq_laneq_f64(__p0_523, __p1_523, __p2_523, __p3_523) __extension__ ({ \ - float64x2_t __ret_523; \ - float64x2_t __s0_523 = __p0_523; \ - float64x2_t __s1_523 = __p1_523; \ - float64x2_t __s2_523 = __p2_523; \ - __ret_523 = vfmaq_laneq_f64(__s0_523, -__s1_523, __s2_523, __p3_523); \ - __ret_523; \ +#define vfmsq_laneq_f64(__p0_419, __p1_419, __p2_419, __p3_419) __extension__ ({ \ + float64x2_t __ret_419; \ + float64x2_t __s0_419 = __p0_419; \ + float64x2_t __s1_419 = __p1_419; \ + float64x2_t __s2_419 = __p2_419; \ + __ret_419 = vfmaq_laneq_f64(__s0_419, -__s1_419, __s2_419, __p3_419); \ + __ret_419; \ }) #else -#define vfmsq_laneq_f64(__p0_524, __p1_524, __p2_524, __p3_524) __extension__ ({ \ - float64x2_t __ret_524; \ - float64x2_t __s0_524 = __p0_524; \ - float64x2_t __s1_524 = __p1_524; \ - float64x2_t __s2_524 = __p2_524; \ - float64x2_t __rev0_524; __rev0_524 = __builtin_shufflevector(__s0_524, __s0_524, 1, 0); \ - float64x2_t __rev1_524; __rev1_524 = __builtin_shufflevector(__s1_524, __s1_524, 1, 0); \ - float64x2_t __rev2_524; __rev2_524 = __builtin_shufflevector(__s2_524, __s2_524, 1, 0); \ - __ret_524 = __noswap_vfmaq_laneq_f64(__rev0_524, -__rev1_524, __rev2_524, __p3_524); \ - __ret_524 = __builtin_shufflevector(__ret_524, __ret_524, 1, 0); \ - __ret_524; \ +#define vfmsq_laneq_f64(__p0_420, __p1_420, __p2_420, __p3_420) __extension__ ({ \ + float64x2_t __ret_420; \ + float64x2_t __s0_420 = __p0_420; \ + float64x2_t __s1_420 = __p1_420; \ + float64x2_t __s2_420 = __p2_420; \ + float64x2_t __rev0_420; __rev0_420 = __builtin_shufflevector(__s0_420, __s0_420, 1, 0); \ + float64x2_t __rev1_420; __rev1_420 = __builtin_shufflevector(__s1_420, __s1_420, 1, 0); \ + float64x2_t __rev2_420; __rev2_420 = __builtin_shufflevector(__s2_420, __s2_420, 1, 0); \ + __ret_420 = __noswap_vfmaq_laneq_f64(__rev0_420, -__rev1_420, __rev2_420, __p3_420); \ + __ret_420 = __builtin_shufflevector(__ret_420, __ret_420, 1, 0); \ + __ret_420; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vfmsq_laneq_f32(__p0_525, __p1_525, __p2_525, __p3_525) __extension__ ({ \ - float32x4_t __ret_525; \ - float32x4_t __s0_525 = __p0_525; \ - float32x4_t __s1_525 = __p1_525; \ - float32x4_t __s2_525 = __p2_525; \ - __ret_525 = vfmaq_laneq_f32(__s0_525, -__s1_525, __s2_525, __p3_525); \ - __ret_525; \ +#define vfmsq_laneq_f32(__p0_421, __p1_421, __p2_421, __p3_421) __extension__ ({ \ + float32x4_t __ret_421; \ + float32x4_t __s0_421 = __p0_421; \ + float32x4_t __s1_421 = __p1_421; \ + float32x4_t __s2_421 = __p2_421; \ + __ret_421 = vfmaq_laneq_f32(__s0_421, -__s1_421, __s2_421, __p3_421); \ + __ret_421; \ }) #else -#define vfmsq_laneq_f32(__p0_526, __p1_526, __p2_526, __p3_526) __extension__ ({ \ - float32x4_t __ret_526; \ - float32x4_t __s0_526 = __p0_526; \ - float32x4_t __s1_526 = __p1_526; \ - float32x4_t __s2_526 = __p2_526; \ - float32x4_t __rev0_526; __rev0_526 = __builtin_shufflevector(__s0_526, __s0_526, 3, 2, 1, 0); \ - float32x4_t __rev1_526; __rev1_526 = __builtin_shufflevector(__s1_526, __s1_526, 3, 2, 1, 0); \ - float32x4_t __rev2_526; __rev2_526 = __builtin_shufflevector(__s2_526, __s2_526, 3, 2, 1, 0); \ - __ret_526 = __noswap_vfmaq_laneq_f32(__rev0_526, -__rev1_526, __rev2_526, __p3_526); \ - __ret_526 = __builtin_shufflevector(__ret_526, __ret_526, 3, 2, 1, 0); \ - __ret_526; \ +#define vfmsq_laneq_f32(__p0_422, __p1_422, __p2_422, __p3_422) __extension__ ({ \ + float32x4_t __ret_422; \ + float32x4_t __s0_422 = __p0_422; \ + float32x4_t __s1_422 = __p1_422; \ + float32x4_t __s2_422 = __p2_422; \ + float32x4_t __rev0_422; __rev0_422 = __builtin_shufflevector(__s0_422, __s0_422, 3, 2, 1, 0); \ + float32x4_t __rev1_422; __rev1_422 = __builtin_shufflevector(__s1_422, __s1_422, 3, 2, 1, 0); \ + float32x4_t __rev2_422; __rev2_422 = __builtin_shufflevector(__s2_422, __s2_422, 3, 2, 1, 0); \ + __ret_422 = __noswap_vfmaq_laneq_f32(__rev0_422, -__rev1_422, __rev2_422, __p3_422); \ + __ret_422 = __builtin_shufflevector(__ret_422, __ret_422, 3, 2, 1, 0); \ + __ret_422; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vfms_laneq_f64(__p0_527, __p1_527, __p2_527, __p3_527) __extension__ ({ \ - float64x1_t __ret_527; \ - float64x1_t __s0_527 = __p0_527; \ - float64x1_t __s1_527 = __p1_527; \ - float64x2_t __s2_527 = __p2_527; \ - __ret_527 = vfma_laneq_f64(__s0_527, -__s1_527, __s2_527, __p3_527); \ - __ret_527; \ +#define vfms_laneq_f64(__p0_423, __p1_423, __p2_423, __p3_423) __extension__ ({ \ + float64x1_t __ret_423; \ + float64x1_t __s0_423 = __p0_423; \ + float64x1_t __s1_423 = __p1_423; \ + float64x2_t __s2_423 = __p2_423; \ + __ret_423 = vfma_laneq_f64(__s0_423, -__s1_423, __s2_423, __p3_423); \ + __ret_423; \ }) #else -#define vfms_laneq_f64(__p0_528, __p1_528, __p2_528, __p3_528) __extension__ ({ \ - float64x1_t __ret_528; \ - float64x1_t __s0_528 = __p0_528; \ - float64x1_t __s1_528 = __p1_528; \ - float64x2_t __s2_528 = __p2_528; \ - float64x2_t __rev2_528; __rev2_528 = __builtin_shufflevector(__s2_528, __s2_528, 1, 0); \ - __ret_528 = __noswap_vfma_laneq_f64(__s0_528, -__s1_528, __rev2_528, __p3_528); \ - __ret_528; \ +#define vfms_laneq_f64(__p0_424, __p1_424, __p2_424, __p3_424) __extension__ ({ \ + float64x1_t __ret_424; \ + float64x1_t __s0_424 = __p0_424; \ + float64x1_t __s1_424 = __p1_424; \ + float64x2_t __s2_424 = __p2_424; \ + float64x2_t __rev2_424; __rev2_424 = __builtin_shufflevector(__s2_424, __s2_424, 1, 0); \ + __ret_424 = __noswap_vfma_laneq_f64(__s0_424, -__s1_424, __rev2_424, __p3_424); \ + __ret_424; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vfms_laneq_f32(__p0_529, __p1_529, __p2_529, __p3_529) __extension__ ({ \ - float32x2_t __ret_529; \ - float32x2_t __s0_529 = __p0_529; \ - float32x2_t __s1_529 = __p1_529; \ - float32x4_t __s2_529 = __p2_529; \ - __ret_529 = vfma_laneq_f32(__s0_529, -__s1_529, __s2_529, __p3_529); \ - __ret_529; \ +#define vfms_laneq_f32(__p0_425, __p1_425, __p2_425, __p3_425) __extension__ ({ \ + float32x2_t __ret_425; \ + float32x2_t __s0_425 = __p0_425; \ + float32x2_t __s1_425 = __p1_425; \ + float32x4_t __s2_425 = __p2_425; \ + __ret_425 = vfma_laneq_f32(__s0_425, -__s1_425, __s2_425, __p3_425); \ + __ret_425; \ }) #else -#define vfms_laneq_f32(__p0_530, __p1_530, __p2_530, __p3_530) __extension__ ({ \ - float32x2_t __ret_530; \ - float32x2_t __s0_530 = __p0_530; \ - float32x2_t __s1_530 = __p1_530; \ - float32x4_t __s2_530 = __p2_530; \ - float32x2_t __rev0_530; __rev0_530 = __builtin_shufflevector(__s0_530, __s0_530, 1, 0); \ - float32x2_t __rev1_530; __rev1_530 = __builtin_shufflevector(__s1_530, __s1_530, 1, 0); \ - float32x4_t __rev2_530; __rev2_530 = __builtin_shufflevector(__s2_530, __s2_530, 3, 2, 1, 0); \ - __ret_530 = __noswap_vfma_laneq_f32(__rev0_530, -__rev1_530, __rev2_530, __p3_530); \ - __ret_530 = __builtin_shufflevector(__ret_530, __ret_530, 1, 0); \ - __ret_530; \ +#define vfms_laneq_f32(__p0_426, __p1_426, __p2_426, __p3_426) __extension__ ({ \ + float32x2_t __ret_426; \ + float32x2_t __s0_426 = __p0_426; \ + float32x2_t __s1_426 = __p1_426; \ + float32x4_t __s2_426 = __p2_426; \ + float32x2_t __rev0_426; __rev0_426 = __builtin_shufflevector(__s0_426, __s0_426, 1, 0); \ + float32x2_t __rev1_426; __rev1_426 = __builtin_shufflevector(__s1_426, __s1_426, 1, 0); \ + float32x4_t __rev2_426; __rev2_426 = __builtin_shufflevector(__s2_426, __s2_426, 3, 2, 1, 0); \ + __ret_426 = __noswap_vfma_laneq_f32(__rev0_426, -__rev1_426, __rev2_426, __p3_426); \ + __ret_426 = __builtin_shufflevector(__ret_426, __ret_426, 1, 0); \ + __ret_426; \ }) #endif @@ -55352,530 +49445,530 @@ __ai float64x1_t vmla_f64(float64x1_t __p0, float64x1_t __p1, float64x1_t __p2) return __ret; } #ifdef __LITTLE_ENDIAN__ -#define vmlaq_laneq_u32(__p0_531, __p1_531, __p2_531, __p3_531) __extension__ ({ \ - uint32x4_t __ret_531; \ - uint32x4_t __s0_531 = __p0_531; \ - uint32x4_t __s1_531 = __p1_531; \ - uint32x4_t __s2_531 = __p2_531; \ - __ret_531 = __s0_531 + __s1_531 * splatq_laneq_u32(__s2_531, __p3_531); \ - __ret_531; \ +#define vmlaq_laneq_u32(__p0_427, __p1_427, __p2_427, __p3_427) __extension__ ({ \ + uint32x4_t __ret_427; \ + uint32x4_t __s0_427 = __p0_427; \ + uint32x4_t __s1_427 = __p1_427; \ + uint32x4_t __s2_427 = __p2_427; \ + __ret_427 = __s0_427 + __s1_427 * splatq_laneq_u32(__s2_427, __p3_427); \ + __ret_427; \ }) #else -#define vmlaq_laneq_u32(__p0_532, __p1_532, __p2_532, __p3_532) __extension__ ({ \ - uint32x4_t __ret_532; \ - uint32x4_t __s0_532 = __p0_532; \ - uint32x4_t __s1_532 = __p1_532; \ - uint32x4_t __s2_532 = __p2_532; \ - uint32x4_t __rev0_532; __rev0_532 = __builtin_shufflevector(__s0_532, __s0_532, 3, 2, 1, 0); \ - uint32x4_t __rev1_532; __rev1_532 = __builtin_shufflevector(__s1_532, __s1_532, 3, 2, 1, 0); \ - uint32x4_t __rev2_532; __rev2_532 = __builtin_shufflevector(__s2_532, __s2_532, 3, 2, 1, 0); \ - __ret_532 = __rev0_532 + __rev1_532 * __noswap_splatq_laneq_u32(__rev2_532, __p3_532); \ - __ret_532 = __builtin_shufflevector(__ret_532, __ret_532, 3, 2, 1, 0); \ - __ret_532; \ +#define vmlaq_laneq_u32(__p0_428, __p1_428, __p2_428, __p3_428) __extension__ ({ \ + uint32x4_t __ret_428; \ + uint32x4_t __s0_428 = __p0_428; \ + uint32x4_t __s1_428 = __p1_428; \ + uint32x4_t __s2_428 = __p2_428; \ + uint32x4_t __rev0_428; __rev0_428 = __builtin_shufflevector(__s0_428, __s0_428, 3, 2, 1, 0); \ + uint32x4_t __rev1_428; __rev1_428 = __builtin_shufflevector(__s1_428, __s1_428, 3, 2, 1, 0); \ + uint32x4_t __rev2_428; __rev2_428 = __builtin_shufflevector(__s2_428, __s2_428, 3, 2, 1, 0); \ + __ret_428 = __rev0_428 + __rev1_428 * __noswap_splatq_laneq_u32(__rev2_428, __p3_428); \ + __ret_428 = __builtin_shufflevector(__ret_428, __ret_428, 3, 2, 1, 0); \ + __ret_428; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vmlaq_laneq_u16(__p0_533, __p1_533, __p2_533, __p3_533) __extension__ ({ \ - uint16x8_t __ret_533; \ - uint16x8_t __s0_533 = __p0_533; \ - uint16x8_t __s1_533 = __p1_533; \ - uint16x8_t __s2_533 = __p2_533; \ - __ret_533 = __s0_533 + __s1_533 * splatq_laneq_u16(__s2_533, __p3_533); \ - __ret_533; \ +#define vmlaq_laneq_u16(__p0_429, __p1_429, __p2_429, __p3_429) __extension__ ({ \ + uint16x8_t __ret_429; \ + uint16x8_t __s0_429 = __p0_429; \ + uint16x8_t __s1_429 = __p1_429; \ + uint16x8_t __s2_429 = __p2_429; \ + __ret_429 = __s0_429 + __s1_429 * splatq_laneq_u16(__s2_429, __p3_429); \ + __ret_429; \ }) #else -#define vmlaq_laneq_u16(__p0_534, __p1_534, __p2_534, __p3_534) __extension__ ({ \ - uint16x8_t __ret_534; \ - uint16x8_t __s0_534 = __p0_534; \ - uint16x8_t __s1_534 = __p1_534; \ - uint16x8_t __s2_534 = __p2_534; \ - uint16x8_t __rev0_534; __rev0_534 = __builtin_shufflevector(__s0_534, __s0_534, 7, 6, 5, 4, 3, 2, 1, 0); \ - uint16x8_t __rev1_534; __rev1_534 = __builtin_shufflevector(__s1_534, __s1_534, 7, 6, 5, 4, 3, 2, 1, 0); \ - uint16x8_t __rev2_534; __rev2_534 = __builtin_shufflevector(__s2_534, __s2_534, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_534 = __rev0_534 + __rev1_534 * __noswap_splatq_laneq_u16(__rev2_534, __p3_534); \ - __ret_534 = __builtin_shufflevector(__ret_534, __ret_534, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_534; \ +#define vmlaq_laneq_u16(__p0_430, __p1_430, __p2_430, __p3_430) __extension__ ({ \ + uint16x8_t __ret_430; \ + uint16x8_t __s0_430 = __p0_430; \ + uint16x8_t __s1_430 = __p1_430; \ + uint16x8_t __s2_430 = __p2_430; \ + uint16x8_t __rev0_430; __rev0_430 = __builtin_shufflevector(__s0_430, __s0_430, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint16x8_t __rev1_430; __rev1_430 = __builtin_shufflevector(__s1_430, __s1_430, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint16x8_t __rev2_430; __rev2_430 = __builtin_shufflevector(__s2_430, __s2_430, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_430 = __rev0_430 + __rev1_430 * __noswap_splatq_laneq_u16(__rev2_430, __p3_430); \ + __ret_430 = __builtin_shufflevector(__ret_430, __ret_430, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_430; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vmlaq_laneq_f32(__p0_535, __p1_535, __p2_535, __p3_535) __extension__ ({ \ - float32x4_t __ret_535; \ - float32x4_t __s0_535 = __p0_535; \ - float32x4_t __s1_535 = __p1_535; \ - float32x4_t __s2_535 = __p2_535; \ - __ret_535 = __s0_535 + __s1_535 * splatq_laneq_f32(__s2_535, __p3_535); \ - __ret_535; \ +#define vmlaq_laneq_f32(__p0_431, __p1_431, __p2_431, __p3_431) __extension__ ({ \ + float32x4_t __ret_431; \ + float32x4_t __s0_431 = __p0_431; \ + float32x4_t __s1_431 = __p1_431; \ + float32x4_t __s2_431 = __p2_431; \ + __ret_431 = __s0_431 + __s1_431 * splatq_laneq_f32(__s2_431, __p3_431); \ + __ret_431; \ }) #else -#define vmlaq_laneq_f32(__p0_536, __p1_536, __p2_536, __p3_536) __extension__ ({ \ - float32x4_t __ret_536; \ - float32x4_t __s0_536 = __p0_536; \ - float32x4_t __s1_536 = __p1_536; \ - float32x4_t __s2_536 = __p2_536; \ - float32x4_t __rev0_536; __rev0_536 = __builtin_shufflevector(__s0_536, __s0_536, 3, 2, 1, 0); \ - float32x4_t __rev1_536; __rev1_536 = __builtin_shufflevector(__s1_536, __s1_536, 3, 2, 1, 0); \ - float32x4_t __rev2_536; __rev2_536 = __builtin_shufflevector(__s2_536, __s2_536, 3, 2, 1, 0); \ - __ret_536 = __rev0_536 + __rev1_536 * __noswap_splatq_laneq_f32(__rev2_536, __p3_536); \ - __ret_536 = __builtin_shufflevector(__ret_536, __ret_536, 3, 2, 1, 0); \ - __ret_536; \ +#define vmlaq_laneq_f32(__p0_432, __p1_432, __p2_432, __p3_432) __extension__ ({ \ + float32x4_t __ret_432; \ + float32x4_t __s0_432 = __p0_432; \ + float32x4_t __s1_432 = __p1_432; \ + float32x4_t __s2_432 = __p2_432; \ + float32x4_t __rev0_432; __rev0_432 = __builtin_shufflevector(__s0_432, __s0_432, 3, 2, 1, 0); \ + float32x4_t __rev1_432; __rev1_432 = __builtin_shufflevector(__s1_432, __s1_432, 3, 2, 1, 0); \ + float32x4_t __rev2_432; __rev2_432 = __builtin_shufflevector(__s2_432, __s2_432, 3, 2, 1, 0); \ + __ret_432 = __rev0_432 + __rev1_432 * __noswap_splatq_laneq_f32(__rev2_432, __p3_432); \ + __ret_432 = __builtin_shufflevector(__ret_432, __ret_432, 3, 2, 1, 0); \ + __ret_432; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vmlaq_laneq_s32(__p0_537, __p1_537, __p2_537, __p3_537) __extension__ ({ \ - int32x4_t __ret_537; \ - int32x4_t __s0_537 = __p0_537; \ - int32x4_t __s1_537 = __p1_537; \ - int32x4_t __s2_537 = __p2_537; \ - __ret_537 = __s0_537 + __s1_537 * splatq_laneq_s32(__s2_537, __p3_537); \ - __ret_537; \ +#define vmlaq_laneq_s32(__p0_433, __p1_433, __p2_433, __p3_433) __extension__ ({ \ + int32x4_t __ret_433; \ + int32x4_t __s0_433 = __p0_433; \ + int32x4_t __s1_433 = __p1_433; \ + int32x4_t __s2_433 = __p2_433; \ + __ret_433 = __s0_433 + __s1_433 * splatq_laneq_s32(__s2_433, __p3_433); \ + __ret_433; \ }) #else -#define vmlaq_laneq_s32(__p0_538, __p1_538, __p2_538, __p3_538) __extension__ ({ \ - int32x4_t __ret_538; \ - int32x4_t __s0_538 = __p0_538; \ - int32x4_t __s1_538 = __p1_538; \ - int32x4_t __s2_538 = __p2_538; \ - int32x4_t __rev0_538; __rev0_538 = __builtin_shufflevector(__s0_538, __s0_538, 3, 2, 1, 0); \ - int32x4_t __rev1_538; __rev1_538 = __builtin_shufflevector(__s1_538, __s1_538, 3, 2, 1, 0); \ - int32x4_t __rev2_538; __rev2_538 = __builtin_shufflevector(__s2_538, __s2_538, 3, 2, 1, 0); \ - __ret_538 = __rev0_538 + __rev1_538 * __noswap_splatq_laneq_s32(__rev2_538, __p3_538); \ - __ret_538 = __builtin_shufflevector(__ret_538, __ret_538, 3, 2, 1, 0); \ - __ret_538; \ +#define vmlaq_laneq_s32(__p0_434, __p1_434, __p2_434, __p3_434) __extension__ ({ \ + int32x4_t __ret_434; \ + int32x4_t __s0_434 = __p0_434; \ + int32x4_t __s1_434 = __p1_434; \ + int32x4_t __s2_434 = __p2_434; \ + int32x4_t __rev0_434; __rev0_434 = __builtin_shufflevector(__s0_434, __s0_434, 3, 2, 1, 0); \ + int32x4_t __rev1_434; __rev1_434 = __builtin_shufflevector(__s1_434, __s1_434, 3, 2, 1, 0); \ + int32x4_t __rev2_434; __rev2_434 = __builtin_shufflevector(__s2_434, __s2_434, 3, 2, 1, 0); \ + __ret_434 = __rev0_434 + __rev1_434 * __noswap_splatq_laneq_s32(__rev2_434, __p3_434); \ + __ret_434 = __builtin_shufflevector(__ret_434, __ret_434, 3, 2, 1, 0); \ + __ret_434; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vmlaq_laneq_s16(__p0_539, __p1_539, __p2_539, __p3_539) __extension__ ({ \ - int16x8_t __ret_539; \ - int16x8_t __s0_539 = __p0_539; \ - int16x8_t __s1_539 = __p1_539; \ - int16x8_t __s2_539 = __p2_539; \ - __ret_539 = __s0_539 + __s1_539 * splatq_laneq_s16(__s2_539, __p3_539); \ - __ret_539; \ +#define vmlaq_laneq_s16(__p0_435, __p1_435, __p2_435, __p3_435) __extension__ ({ \ + int16x8_t __ret_435; \ + int16x8_t __s0_435 = __p0_435; \ + int16x8_t __s1_435 = __p1_435; \ + int16x8_t __s2_435 = __p2_435; \ + __ret_435 = __s0_435 + __s1_435 * splatq_laneq_s16(__s2_435, __p3_435); \ + __ret_435; \ }) #else -#define vmlaq_laneq_s16(__p0_540, __p1_540, __p2_540, __p3_540) __extension__ ({ \ - int16x8_t __ret_540; \ - int16x8_t __s0_540 = __p0_540; \ - int16x8_t __s1_540 = __p1_540; \ - int16x8_t __s2_540 = __p2_540; \ - int16x8_t __rev0_540; __rev0_540 = __builtin_shufflevector(__s0_540, __s0_540, 7, 6, 5, 4, 3, 2, 1, 0); \ - int16x8_t __rev1_540; __rev1_540 = __builtin_shufflevector(__s1_540, __s1_540, 7, 6, 5, 4, 3, 2, 1, 0); \ - int16x8_t __rev2_540; __rev2_540 = __builtin_shufflevector(__s2_540, __s2_540, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_540 = __rev0_540 + __rev1_540 * __noswap_splatq_laneq_s16(__rev2_540, __p3_540); \ - __ret_540 = __builtin_shufflevector(__ret_540, __ret_540, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_540; \ +#define vmlaq_laneq_s16(__p0_436, __p1_436, __p2_436, __p3_436) __extension__ ({ \ + int16x8_t __ret_436; \ + int16x8_t __s0_436 = __p0_436; \ + int16x8_t __s1_436 = __p1_436; \ + int16x8_t __s2_436 = __p2_436; \ + int16x8_t __rev0_436; __rev0_436 = __builtin_shufflevector(__s0_436, __s0_436, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __rev1_436; __rev1_436 = __builtin_shufflevector(__s1_436, __s1_436, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __rev2_436; __rev2_436 = __builtin_shufflevector(__s2_436, __s2_436, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_436 = __rev0_436 + __rev1_436 * __noswap_splatq_laneq_s16(__rev2_436, __p3_436); \ + __ret_436 = __builtin_shufflevector(__ret_436, __ret_436, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_436; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vmla_laneq_u32(__p0_541, __p1_541, __p2_541, __p3_541) __extension__ ({ \ - uint32x2_t __ret_541; \ - uint32x2_t __s0_541 = __p0_541; \ - uint32x2_t __s1_541 = __p1_541; \ - uint32x4_t __s2_541 = __p2_541; \ - __ret_541 = __s0_541 + __s1_541 * splat_laneq_u32(__s2_541, __p3_541); \ - __ret_541; \ +#define vmla_laneq_u32(__p0_437, __p1_437, __p2_437, __p3_437) __extension__ ({ \ + uint32x2_t __ret_437; \ + uint32x2_t __s0_437 = __p0_437; \ + uint32x2_t __s1_437 = __p1_437; \ + uint32x4_t __s2_437 = __p2_437; \ + __ret_437 = __s0_437 + __s1_437 * splat_laneq_u32(__s2_437, __p3_437); \ + __ret_437; \ }) #else -#define vmla_laneq_u32(__p0_542, __p1_542, __p2_542, __p3_542) __extension__ ({ \ - uint32x2_t __ret_542; \ - uint32x2_t __s0_542 = __p0_542; \ - uint32x2_t __s1_542 = __p1_542; \ - uint32x4_t __s2_542 = __p2_542; \ - uint32x2_t __rev0_542; __rev0_542 = __builtin_shufflevector(__s0_542, __s0_542, 1, 0); \ - uint32x2_t __rev1_542; __rev1_542 = __builtin_shufflevector(__s1_542, __s1_542, 1, 0); \ - uint32x4_t __rev2_542; __rev2_542 = __builtin_shufflevector(__s2_542, __s2_542, 3, 2, 1, 0); \ - __ret_542 = __rev0_542 + __rev1_542 * __noswap_splat_laneq_u32(__rev2_542, __p3_542); \ - __ret_542 = __builtin_shufflevector(__ret_542, __ret_542, 1, 0); \ - __ret_542; \ +#define vmla_laneq_u32(__p0_438, __p1_438, __p2_438, __p3_438) __extension__ ({ \ + uint32x2_t __ret_438; \ + uint32x2_t __s0_438 = __p0_438; \ + uint32x2_t __s1_438 = __p1_438; \ + uint32x4_t __s2_438 = __p2_438; \ + uint32x2_t __rev0_438; __rev0_438 = __builtin_shufflevector(__s0_438, __s0_438, 1, 0); \ + uint32x2_t __rev1_438; __rev1_438 = __builtin_shufflevector(__s1_438, __s1_438, 1, 0); \ + uint32x4_t __rev2_438; __rev2_438 = __builtin_shufflevector(__s2_438, __s2_438, 3, 2, 1, 0); \ + __ret_438 = __rev0_438 + __rev1_438 * __noswap_splat_laneq_u32(__rev2_438, __p3_438); \ + __ret_438 = __builtin_shufflevector(__ret_438, __ret_438, 1, 0); \ + __ret_438; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vmla_laneq_u16(__p0_543, __p1_543, __p2_543, __p3_543) __extension__ ({ \ - uint16x4_t __ret_543; \ - uint16x4_t __s0_543 = __p0_543; \ - uint16x4_t __s1_543 = __p1_543; \ - uint16x8_t __s2_543 = __p2_543; \ - __ret_543 = __s0_543 + __s1_543 * splat_laneq_u16(__s2_543, __p3_543); \ - __ret_543; \ +#define vmla_laneq_u16(__p0_439, __p1_439, __p2_439, __p3_439) __extension__ ({ \ + uint16x4_t __ret_439; \ + uint16x4_t __s0_439 = __p0_439; \ + uint16x4_t __s1_439 = __p1_439; \ + uint16x8_t __s2_439 = __p2_439; \ + __ret_439 = __s0_439 + __s1_439 * splat_laneq_u16(__s2_439, __p3_439); \ + __ret_439; \ }) #else -#define vmla_laneq_u16(__p0_544, __p1_544, __p2_544, __p3_544) __extension__ ({ \ - uint16x4_t __ret_544; \ - uint16x4_t __s0_544 = __p0_544; \ - uint16x4_t __s1_544 = __p1_544; \ - uint16x8_t __s2_544 = __p2_544; \ - uint16x4_t __rev0_544; __rev0_544 = __builtin_shufflevector(__s0_544, __s0_544, 3, 2, 1, 0); \ - uint16x4_t __rev1_544; __rev1_544 = __builtin_shufflevector(__s1_544, __s1_544, 3, 2, 1, 0); \ - uint16x8_t __rev2_544; __rev2_544 = __builtin_shufflevector(__s2_544, __s2_544, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_544 = __rev0_544 + __rev1_544 * __noswap_splat_laneq_u16(__rev2_544, __p3_544); \ - __ret_544 = __builtin_shufflevector(__ret_544, __ret_544, 3, 2, 1, 0); \ - __ret_544; \ +#define vmla_laneq_u16(__p0_440, __p1_440, __p2_440, __p3_440) __extension__ ({ \ + uint16x4_t __ret_440; \ + uint16x4_t __s0_440 = __p0_440; \ + uint16x4_t __s1_440 = __p1_440; \ + uint16x8_t __s2_440 = __p2_440; \ + uint16x4_t __rev0_440; __rev0_440 = __builtin_shufflevector(__s0_440, __s0_440, 3, 2, 1, 0); \ + uint16x4_t __rev1_440; __rev1_440 = __builtin_shufflevector(__s1_440, __s1_440, 3, 2, 1, 0); \ + uint16x8_t __rev2_440; __rev2_440 = __builtin_shufflevector(__s2_440, __s2_440, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_440 = __rev0_440 + __rev1_440 * __noswap_splat_laneq_u16(__rev2_440, __p3_440); \ + __ret_440 = __builtin_shufflevector(__ret_440, __ret_440, 3, 2, 1, 0); \ + __ret_440; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vmla_laneq_f32(__p0_545, __p1_545, __p2_545, __p3_545) __extension__ ({ \ - float32x2_t __ret_545; \ - float32x2_t __s0_545 = __p0_545; \ - float32x2_t __s1_545 = __p1_545; \ - float32x4_t __s2_545 = __p2_545; \ - __ret_545 = __s0_545 + __s1_545 * splat_laneq_f32(__s2_545, __p3_545); \ - __ret_545; \ +#define vmla_laneq_f32(__p0_441, __p1_441, __p2_441, __p3_441) __extension__ ({ \ + float32x2_t __ret_441; \ + float32x2_t __s0_441 = __p0_441; \ + float32x2_t __s1_441 = __p1_441; \ + float32x4_t __s2_441 = __p2_441; \ + __ret_441 = __s0_441 + __s1_441 * splat_laneq_f32(__s2_441, __p3_441); \ + __ret_441; \ }) #else -#define vmla_laneq_f32(__p0_546, __p1_546, __p2_546, __p3_546) __extension__ ({ \ - float32x2_t __ret_546; \ - float32x2_t __s0_546 = __p0_546; \ - float32x2_t __s1_546 = __p1_546; \ - float32x4_t __s2_546 = __p2_546; \ - float32x2_t __rev0_546; __rev0_546 = __builtin_shufflevector(__s0_546, __s0_546, 1, 0); \ - float32x2_t __rev1_546; __rev1_546 = __builtin_shufflevector(__s1_546, __s1_546, 1, 0); \ - float32x4_t __rev2_546; __rev2_546 = __builtin_shufflevector(__s2_546, __s2_546, 3, 2, 1, 0); \ - __ret_546 = __rev0_546 + __rev1_546 * __noswap_splat_laneq_f32(__rev2_546, __p3_546); \ - __ret_546 = __builtin_shufflevector(__ret_546, __ret_546, 1, 0); \ - __ret_546; \ +#define vmla_laneq_f32(__p0_442, __p1_442, __p2_442, __p3_442) __extension__ ({ \ + float32x2_t __ret_442; \ + float32x2_t __s0_442 = __p0_442; \ + float32x2_t __s1_442 = __p1_442; \ + float32x4_t __s2_442 = __p2_442; \ + float32x2_t __rev0_442; __rev0_442 = __builtin_shufflevector(__s0_442, __s0_442, 1, 0); \ + float32x2_t __rev1_442; __rev1_442 = __builtin_shufflevector(__s1_442, __s1_442, 1, 0); \ + float32x4_t __rev2_442; __rev2_442 = __builtin_shufflevector(__s2_442, __s2_442, 3, 2, 1, 0); \ + __ret_442 = __rev0_442 + __rev1_442 * __noswap_splat_laneq_f32(__rev2_442, __p3_442); \ + __ret_442 = __builtin_shufflevector(__ret_442, __ret_442, 1, 0); \ + __ret_442; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vmla_laneq_s32(__p0_547, __p1_547, __p2_547, __p3_547) __extension__ ({ \ - int32x2_t __ret_547; \ - int32x2_t __s0_547 = __p0_547; \ - int32x2_t __s1_547 = __p1_547; \ - int32x4_t __s2_547 = __p2_547; \ - __ret_547 = __s0_547 + __s1_547 * splat_laneq_s32(__s2_547, __p3_547); \ - __ret_547; \ +#define vmla_laneq_s32(__p0_443, __p1_443, __p2_443, __p3_443) __extension__ ({ \ + int32x2_t __ret_443; \ + int32x2_t __s0_443 = __p0_443; \ + int32x2_t __s1_443 = __p1_443; \ + int32x4_t __s2_443 = __p2_443; \ + __ret_443 = __s0_443 + __s1_443 * splat_laneq_s32(__s2_443, __p3_443); \ + __ret_443; \ }) #else -#define vmla_laneq_s32(__p0_548, __p1_548, __p2_548, __p3_548) __extension__ ({ \ - int32x2_t __ret_548; \ - int32x2_t __s0_548 = __p0_548; \ - int32x2_t __s1_548 = __p1_548; \ - int32x4_t __s2_548 = __p2_548; \ - int32x2_t __rev0_548; __rev0_548 = __builtin_shufflevector(__s0_548, __s0_548, 1, 0); \ - int32x2_t __rev1_548; __rev1_548 = __builtin_shufflevector(__s1_548, __s1_548, 1, 0); \ - int32x4_t __rev2_548; __rev2_548 = __builtin_shufflevector(__s2_548, __s2_548, 3, 2, 1, 0); \ - __ret_548 = __rev0_548 + __rev1_548 * __noswap_splat_laneq_s32(__rev2_548, __p3_548); \ - __ret_548 = __builtin_shufflevector(__ret_548, __ret_548, 1, 0); \ - __ret_548; \ +#define vmla_laneq_s32(__p0_444, __p1_444, __p2_444, __p3_444) __extension__ ({ \ + int32x2_t __ret_444; \ + int32x2_t __s0_444 = __p0_444; \ + int32x2_t __s1_444 = __p1_444; \ + int32x4_t __s2_444 = __p2_444; \ + int32x2_t __rev0_444; __rev0_444 = __builtin_shufflevector(__s0_444, __s0_444, 1, 0); \ + int32x2_t __rev1_444; __rev1_444 = __builtin_shufflevector(__s1_444, __s1_444, 1, 0); \ + int32x4_t __rev2_444; __rev2_444 = __builtin_shufflevector(__s2_444, __s2_444, 3, 2, 1, 0); \ + __ret_444 = __rev0_444 + __rev1_444 * __noswap_splat_laneq_s32(__rev2_444, __p3_444); \ + __ret_444 = __builtin_shufflevector(__ret_444, __ret_444, 1, 0); \ + __ret_444; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vmla_laneq_s16(__p0_549, __p1_549, __p2_549, __p3_549) __extension__ ({ \ - int16x4_t __ret_549; \ - int16x4_t __s0_549 = __p0_549; \ - int16x4_t __s1_549 = __p1_549; \ - int16x8_t __s2_549 = __p2_549; \ - __ret_549 = __s0_549 + __s1_549 * splat_laneq_s16(__s2_549, __p3_549); \ - __ret_549; \ +#define vmla_laneq_s16(__p0_445, __p1_445, __p2_445, __p3_445) __extension__ ({ \ + int16x4_t __ret_445; \ + int16x4_t __s0_445 = __p0_445; \ + int16x4_t __s1_445 = __p1_445; \ + int16x8_t __s2_445 = __p2_445; \ + __ret_445 = __s0_445 + __s1_445 * splat_laneq_s16(__s2_445, __p3_445); \ + __ret_445; \ }) #else -#define vmla_laneq_s16(__p0_550, __p1_550, __p2_550, __p3_550) __extension__ ({ \ - int16x4_t __ret_550; \ - int16x4_t __s0_550 = __p0_550; \ - int16x4_t __s1_550 = __p1_550; \ - int16x8_t __s2_550 = __p2_550; \ - int16x4_t __rev0_550; __rev0_550 = __builtin_shufflevector(__s0_550, __s0_550, 3, 2, 1, 0); \ - int16x4_t __rev1_550; __rev1_550 = __builtin_shufflevector(__s1_550, __s1_550, 3, 2, 1, 0); \ - int16x8_t __rev2_550; __rev2_550 = __builtin_shufflevector(__s2_550, __s2_550, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_550 = __rev0_550 + __rev1_550 * __noswap_splat_laneq_s16(__rev2_550, __p3_550); \ - __ret_550 = __builtin_shufflevector(__ret_550, __ret_550, 3, 2, 1, 0); \ - __ret_550; \ +#define vmla_laneq_s16(__p0_446, __p1_446, __p2_446, __p3_446) __extension__ ({ \ + int16x4_t __ret_446; \ + int16x4_t __s0_446 = __p0_446; \ + int16x4_t __s1_446 = __p1_446; \ + int16x8_t __s2_446 = __p2_446; \ + int16x4_t __rev0_446; __rev0_446 = __builtin_shufflevector(__s0_446, __s0_446, 3, 2, 1, 0); \ + int16x4_t __rev1_446; __rev1_446 = __builtin_shufflevector(__s1_446, __s1_446, 3, 2, 1, 0); \ + int16x8_t __rev2_446; __rev2_446 = __builtin_shufflevector(__s2_446, __s2_446, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_446 = __rev0_446 + __rev1_446 * __noswap_splat_laneq_s16(__rev2_446, __p3_446); \ + __ret_446 = __builtin_shufflevector(__ret_446, __ret_446, 3, 2, 1, 0); \ + __ret_446; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vmlal_high_lane_u32(__p0_551, __p1_551, __p2_551, __p3_551) __extension__ ({ \ - uint64x2_t __ret_551; \ - uint64x2_t __s0_551 = __p0_551; \ - uint32x4_t __s1_551 = __p1_551; \ - uint32x2_t __s2_551 = __p2_551; \ - __ret_551 = __s0_551 + vmull_u32(vget_high_u32(__s1_551), splat_lane_u32(__s2_551, __p3_551)); \ - __ret_551; \ +#define vmlal_high_lane_u32(__p0_447, __p1_447, __p2_447, __p3_447) __extension__ ({ \ + uint64x2_t __ret_447; \ + uint64x2_t __s0_447 = __p0_447; \ + uint32x4_t __s1_447 = __p1_447; \ + uint32x2_t __s2_447 = __p2_447; \ + __ret_447 = __s0_447 + vmull_u32(vget_high_u32(__s1_447), splat_lane_u32(__s2_447, __p3_447)); \ + __ret_447; \ }) #else -#define vmlal_high_lane_u32(__p0_552, __p1_552, __p2_552, __p3_552) __extension__ ({ \ - uint64x2_t __ret_552; \ - uint64x2_t __s0_552 = __p0_552; \ - uint32x4_t __s1_552 = __p1_552; \ - uint32x2_t __s2_552 = __p2_552; \ - uint64x2_t __rev0_552; __rev0_552 = __builtin_shufflevector(__s0_552, __s0_552, 1, 0); \ - uint32x4_t __rev1_552; __rev1_552 = __builtin_shufflevector(__s1_552, __s1_552, 3, 2, 1, 0); \ - uint32x2_t __rev2_552; __rev2_552 = __builtin_shufflevector(__s2_552, __s2_552, 1, 0); \ - __ret_552 = __rev0_552 + __noswap_vmull_u32(__noswap_vget_high_u32(__rev1_552), __noswap_splat_lane_u32(__rev2_552, __p3_552)); \ - __ret_552 = __builtin_shufflevector(__ret_552, __ret_552, 1, 0); \ - __ret_552; \ +#define vmlal_high_lane_u32(__p0_448, __p1_448, __p2_448, __p3_448) __extension__ ({ \ + uint64x2_t __ret_448; \ + uint64x2_t __s0_448 = __p0_448; \ + uint32x4_t __s1_448 = __p1_448; \ + uint32x2_t __s2_448 = __p2_448; \ + uint64x2_t __rev0_448; __rev0_448 = __builtin_shufflevector(__s0_448, __s0_448, 1, 0); \ + uint32x4_t __rev1_448; __rev1_448 = __builtin_shufflevector(__s1_448, __s1_448, 3, 2, 1, 0); \ + uint32x2_t __rev2_448; __rev2_448 = __builtin_shufflevector(__s2_448, __s2_448, 1, 0); \ + __ret_448 = __rev0_448 + __noswap_vmull_u32(__noswap_vget_high_u32(__rev1_448), __noswap_splat_lane_u32(__rev2_448, __p3_448)); \ + __ret_448 = __builtin_shufflevector(__ret_448, __ret_448, 1, 0); \ + __ret_448; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vmlal_high_lane_u16(__p0_553, __p1_553, __p2_553, __p3_553) __extension__ ({ \ - uint32x4_t __ret_553; \ - uint32x4_t __s0_553 = __p0_553; \ - uint16x8_t __s1_553 = __p1_553; \ - uint16x4_t __s2_553 = __p2_553; \ - __ret_553 = __s0_553 + vmull_u16(vget_high_u16(__s1_553), splat_lane_u16(__s2_553, __p3_553)); \ - __ret_553; \ +#define vmlal_high_lane_u16(__p0_449, __p1_449, __p2_449, __p3_449) __extension__ ({ \ + uint32x4_t __ret_449; \ + uint32x4_t __s0_449 = __p0_449; \ + uint16x8_t __s1_449 = __p1_449; \ + uint16x4_t __s2_449 = __p2_449; \ + __ret_449 = __s0_449 + vmull_u16(vget_high_u16(__s1_449), splat_lane_u16(__s2_449, __p3_449)); \ + __ret_449; \ }) #else -#define vmlal_high_lane_u16(__p0_554, __p1_554, __p2_554, __p3_554) __extension__ ({ \ - uint32x4_t __ret_554; \ - uint32x4_t __s0_554 = __p0_554; \ - uint16x8_t __s1_554 = __p1_554; \ - uint16x4_t __s2_554 = __p2_554; \ - uint32x4_t __rev0_554; __rev0_554 = __builtin_shufflevector(__s0_554, __s0_554, 3, 2, 1, 0); \ - uint16x8_t __rev1_554; __rev1_554 = __builtin_shufflevector(__s1_554, __s1_554, 7, 6, 5, 4, 3, 2, 1, 0); \ - uint16x4_t __rev2_554; __rev2_554 = __builtin_shufflevector(__s2_554, __s2_554, 3, 2, 1, 0); \ - __ret_554 = __rev0_554 + __noswap_vmull_u16(__noswap_vget_high_u16(__rev1_554), __noswap_splat_lane_u16(__rev2_554, __p3_554)); \ - __ret_554 = __builtin_shufflevector(__ret_554, __ret_554, 3, 2, 1, 0); \ - __ret_554; \ +#define vmlal_high_lane_u16(__p0_450, __p1_450, __p2_450, __p3_450) __extension__ ({ \ + uint32x4_t __ret_450; \ + uint32x4_t __s0_450 = __p0_450; \ + uint16x8_t __s1_450 = __p1_450; \ + uint16x4_t __s2_450 = __p2_450; \ + uint32x4_t __rev0_450; __rev0_450 = __builtin_shufflevector(__s0_450, __s0_450, 3, 2, 1, 0); \ + uint16x8_t __rev1_450; __rev1_450 = __builtin_shufflevector(__s1_450, __s1_450, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint16x4_t __rev2_450; __rev2_450 = __builtin_shufflevector(__s2_450, __s2_450, 3, 2, 1, 0); \ + __ret_450 = __rev0_450 + __noswap_vmull_u16(__noswap_vget_high_u16(__rev1_450), __noswap_splat_lane_u16(__rev2_450, __p3_450)); \ + __ret_450 = __builtin_shufflevector(__ret_450, __ret_450, 3, 2, 1, 0); \ + __ret_450; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vmlal_high_lane_s32(__p0_555, __p1_555, __p2_555, __p3_555) __extension__ ({ \ - int64x2_t __ret_555; \ - int64x2_t __s0_555 = __p0_555; \ - int32x4_t __s1_555 = __p1_555; \ - int32x2_t __s2_555 = __p2_555; \ - __ret_555 = __s0_555 + vmull_s32(vget_high_s32(__s1_555), splat_lane_s32(__s2_555, __p3_555)); \ - __ret_555; \ +#define vmlal_high_lane_s32(__p0_451, __p1_451, __p2_451, __p3_451) __extension__ ({ \ + int64x2_t __ret_451; \ + int64x2_t __s0_451 = __p0_451; \ + int32x4_t __s1_451 = __p1_451; \ + int32x2_t __s2_451 = __p2_451; \ + __ret_451 = __s0_451 + vmull_s32(vget_high_s32(__s1_451), splat_lane_s32(__s2_451, __p3_451)); \ + __ret_451; \ }) #else -#define vmlal_high_lane_s32(__p0_556, __p1_556, __p2_556, __p3_556) __extension__ ({ \ - int64x2_t __ret_556; \ - int64x2_t __s0_556 = __p0_556; \ - int32x4_t __s1_556 = __p1_556; \ - int32x2_t __s2_556 = __p2_556; \ - int64x2_t __rev0_556; __rev0_556 = __builtin_shufflevector(__s0_556, __s0_556, 1, 0); \ - int32x4_t __rev1_556; __rev1_556 = __builtin_shufflevector(__s1_556, __s1_556, 3, 2, 1, 0); \ - int32x2_t __rev2_556; __rev2_556 = __builtin_shufflevector(__s2_556, __s2_556, 1, 0); \ - __ret_556 = __rev0_556 + __noswap_vmull_s32(__noswap_vget_high_s32(__rev1_556), __noswap_splat_lane_s32(__rev2_556, __p3_556)); \ - __ret_556 = __builtin_shufflevector(__ret_556, __ret_556, 1, 0); \ - __ret_556; \ +#define vmlal_high_lane_s32(__p0_452, __p1_452, __p2_452, __p3_452) __extension__ ({ \ + int64x2_t __ret_452; \ + int64x2_t __s0_452 = __p0_452; \ + int32x4_t __s1_452 = __p1_452; \ + int32x2_t __s2_452 = __p2_452; \ + int64x2_t __rev0_452; __rev0_452 = __builtin_shufflevector(__s0_452, __s0_452, 1, 0); \ + int32x4_t __rev1_452; __rev1_452 = __builtin_shufflevector(__s1_452, __s1_452, 3, 2, 1, 0); \ + int32x2_t __rev2_452; __rev2_452 = __builtin_shufflevector(__s2_452, __s2_452, 1, 0); \ + __ret_452 = __rev0_452 + __noswap_vmull_s32(__noswap_vget_high_s32(__rev1_452), __noswap_splat_lane_s32(__rev2_452, __p3_452)); \ + __ret_452 = __builtin_shufflevector(__ret_452, __ret_452, 1, 0); \ + __ret_452; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vmlal_high_lane_s16(__p0_557, __p1_557, __p2_557, __p3_557) __extension__ ({ \ - int32x4_t __ret_557; \ - int32x4_t __s0_557 = __p0_557; \ - int16x8_t __s1_557 = __p1_557; \ - int16x4_t __s2_557 = __p2_557; \ - __ret_557 = __s0_557 + vmull_s16(vget_high_s16(__s1_557), splat_lane_s16(__s2_557, __p3_557)); \ - __ret_557; \ +#define vmlal_high_lane_s16(__p0_453, __p1_453, __p2_453, __p3_453) __extension__ ({ \ + int32x4_t __ret_453; \ + int32x4_t __s0_453 = __p0_453; \ + int16x8_t __s1_453 = __p1_453; \ + int16x4_t __s2_453 = __p2_453; \ + __ret_453 = __s0_453 + vmull_s16(vget_high_s16(__s1_453), splat_lane_s16(__s2_453, __p3_453)); \ + __ret_453; \ }) #else -#define vmlal_high_lane_s16(__p0_558, __p1_558, __p2_558, __p3_558) __extension__ ({ \ - int32x4_t __ret_558; \ - int32x4_t __s0_558 = __p0_558; \ - int16x8_t __s1_558 = __p1_558; \ - int16x4_t __s2_558 = __p2_558; \ - int32x4_t __rev0_558; __rev0_558 = __builtin_shufflevector(__s0_558, __s0_558, 3, 2, 1, 0); \ - int16x8_t __rev1_558; __rev1_558 = __builtin_shufflevector(__s1_558, __s1_558, 7, 6, 5, 4, 3, 2, 1, 0); \ - int16x4_t __rev2_558; __rev2_558 = __builtin_shufflevector(__s2_558, __s2_558, 3, 2, 1, 0); \ - __ret_558 = __rev0_558 + __noswap_vmull_s16(__noswap_vget_high_s16(__rev1_558), __noswap_splat_lane_s16(__rev2_558, __p3_558)); \ - __ret_558 = __builtin_shufflevector(__ret_558, __ret_558, 3, 2, 1, 0); \ - __ret_558; \ +#define vmlal_high_lane_s16(__p0_454, __p1_454, __p2_454, __p3_454) __extension__ ({ \ + int32x4_t __ret_454; \ + int32x4_t __s0_454 = __p0_454; \ + int16x8_t __s1_454 = __p1_454; \ + int16x4_t __s2_454 = __p2_454; \ + int32x4_t __rev0_454; __rev0_454 = __builtin_shufflevector(__s0_454, __s0_454, 3, 2, 1, 0); \ + int16x8_t __rev1_454; __rev1_454 = __builtin_shufflevector(__s1_454, __s1_454, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x4_t __rev2_454; __rev2_454 = __builtin_shufflevector(__s2_454, __s2_454, 3, 2, 1, 0); \ + __ret_454 = __rev0_454 + __noswap_vmull_s16(__noswap_vget_high_s16(__rev1_454), __noswap_splat_lane_s16(__rev2_454, __p3_454)); \ + __ret_454 = __builtin_shufflevector(__ret_454, __ret_454, 3, 2, 1, 0); \ + __ret_454; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vmlal_high_laneq_u32(__p0_559, __p1_559, __p2_559, __p3_559) __extension__ ({ \ - uint64x2_t __ret_559; \ - uint64x2_t __s0_559 = __p0_559; \ - uint32x4_t __s1_559 = __p1_559; \ - uint32x4_t __s2_559 = __p2_559; \ - __ret_559 = __s0_559 + vmull_u32(vget_high_u32(__s1_559), splat_laneq_u32(__s2_559, __p3_559)); \ - __ret_559; \ +#define vmlal_high_laneq_u32(__p0_455, __p1_455, __p2_455, __p3_455) __extension__ ({ \ + uint64x2_t __ret_455; \ + uint64x2_t __s0_455 = __p0_455; \ + uint32x4_t __s1_455 = __p1_455; \ + uint32x4_t __s2_455 = __p2_455; \ + __ret_455 = __s0_455 + vmull_u32(vget_high_u32(__s1_455), splat_laneq_u32(__s2_455, __p3_455)); \ + __ret_455; \ }) #else -#define vmlal_high_laneq_u32(__p0_560, __p1_560, __p2_560, __p3_560) __extension__ ({ \ - uint64x2_t __ret_560; \ - uint64x2_t __s0_560 = __p0_560; \ - uint32x4_t __s1_560 = __p1_560; \ - uint32x4_t __s2_560 = __p2_560; \ - uint64x2_t __rev0_560; __rev0_560 = __builtin_shufflevector(__s0_560, __s0_560, 1, 0); \ - uint32x4_t __rev1_560; __rev1_560 = __builtin_shufflevector(__s1_560, __s1_560, 3, 2, 1, 0); \ - uint32x4_t __rev2_560; __rev2_560 = __builtin_shufflevector(__s2_560, __s2_560, 3, 2, 1, 0); \ - __ret_560 = __rev0_560 + __noswap_vmull_u32(__noswap_vget_high_u32(__rev1_560), __noswap_splat_laneq_u32(__rev2_560, __p3_560)); \ - __ret_560 = __builtin_shufflevector(__ret_560, __ret_560, 1, 0); \ - __ret_560; \ +#define vmlal_high_laneq_u32(__p0_456, __p1_456, __p2_456, __p3_456) __extension__ ({ \ + uint64x2_t __ret_456; \ + uint64x2_t __s0_456 = __p0_456; \ + uint32x4_t __s1_456 = __p1_456; \ + uint32x4_t __s2_456 = __p2_456; \ + uint64x2_t __rev0_456; __rev0_456 = __builtin_shufflevector(__s0_456, __s0_456, 1, 0); \ + uint32x4_t __rev1_456; __rev1_456 = __builtin_shufflevector(__s1_456, __s1_456, 3, 2, 1, 0); \ + uint32x4_t __rev2_456; __rev2_456 = __builtin_shufflevector(__s2_456, __s2_456, 3, 2, 1, 0); \ + __ret_456 = __rev0_456 + __noswap_vmull_u32(__noswap_vget_high_u32(__rev1_456), __noswap_splat_laneq_u32(__rev2_456, __p3_456)); \ + __ret_456 = __builtin_shufflevector(__ret_456, __ret_456, 1, 0); \ + __ret_456; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vmlal_high_laneq_u16(__p0_561, __p1_561, __p2_561, __p3_561) __extension__ ({ \ - uint32x4_t __ret_561; \ - uint32x4_t __s0_561 = __p0_561; \ - uint16x8_t __s1_561 = __p1_561; \ - uint16x8_t __s2_561 = __p2_561; \ - __ret_561 = __s0_561 + vmull_u16(vget_high_u16(__s1_561), splat_laneq_u16(__s2_561, __p3_561)); \ - __ret_561; \ +#define vmlal_high_laneq_u16(__p0_457, __p1_457, __p2_457, __p3_457) __extension__ ({ \ + uint32x4_t __ret_457; \ + uint32x4_t __s0_457 = __p0_457; \ + uint16x8_t __s1_457 = __p1_457; \ + uint16x8_t __s2_457 = __p2_457; \ + __ret_457 = __s0_457 + vmull_u16(vget_high_u16(__s1_457), splat_laneq_u16(__s2_457, __p3_457)); \ + __ret_457; \ }) #else -#define vmlal_high_laneq_u16(__p0_562, __p1_562, __p2_562, __p3_562) __extension__ ({ \ - uint32x4_t __ret_562; \ - uint32x4_t __s0_562 = __p0_562; \ - uint16x8_t __s1_562 = __p1_562; \ - uint16x8_t __s2_562 = __p2_562; \ - uint32x4_t __rev0_562; __rev0_562 = __builtin_shufflevector(__s0_562, __s0_562, 3, 2, 1, 0); \ - uint16x8_t __rev1_562; __rev1_562 = __builtin_shufflevector(__s1_562, __s1_562, 7, 6, 5, 4, 3, 2, 1, 0); \ - uint16x8_t __rev2_562; __rev2_562 = __builtin_shufflevector(__s2_562, __s2_562, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_562 = __rev0_562 + __noswap_vmull_u16(__noswap_vget_high_u16(__rev1_562), __noswap_splat_laneq_u16(__rev2_562, __p3_562)); \ - __ret_562 = __builtin_shufflevector(__ret_562, __ret_562, 3, 2, 1, 0); \ - __ret_562; \ +#define vmlal_high_laneq_u16(__p0_458, __p1_458, __p2_458, __p3_458) __extension__ ({ \ + uint32x4_t __ret_458; \ + uint32x4_t __s0_458 = __p0_458; \ + uint16x8_t __s1_458 = __p1_458; \ + uint16x8_t __s2_458 = __p2_458; \ + uint32x4_t __rev0_458; __rev0_458 = __builtin_shufflevector(__s0_458, __s0_458, 3, 2, 1, 0); \ + uint16x8_t __rev1_458; __rev1_458 = __builtin_shufflevector(__s1_458, __s1_458, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint16x8_t __rev2_458; __rev2_458 = __builtin_shufflevector(__s2_458, __s2_458, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_458 = __rev0_458 + __noswap_vmull_u16(__noswap_vget_high_u16(__rev1_458), __noswap_splat_laneq_u16(__rev2_458, __p3_458)); \ + __ret_458 = __builtin_shufflevector(__ret_458, __ret_458, 3, 2, 1, 0); \ + __ret_458; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vmlal_high_laneq_s32(__p0_563, __p1_563, __p2_563, __p3_563) __extension__ ({ \ - int64x2_t __ret_563; \ - int64x2_t __s0_563 = __p0_563; \ - int32x4_t __s1_563 = __p1_563; \ - int32x4_t __s2_563 = __p2_563; \ - __ret_563 = __s0_563 + vmull_s32(vget_high_s32(__s1_563), splat_laneq_s32(__s2_563, __p3_563)); \ - __ret_563; \ +#define vmlal_high_laneq_s32(__p0_459, __p1_459, __p2_459, __p3_459) __extension__ ({ \ + int64x2_t __ret_459; \ + int64x2_t __s0_459 = __p0_459; \ + int32x4_t __s1_459 = __p1_459; \ + int32x4_t __s2_459 = __p2_459; \ + __ret_459 = __s0_459 + vmull_s32(vget_high_s32(__s1_459), splat_laneq_s32(__s2_459, __p3_459)); \ + __ret_459; \ }) #else -#define vmlal_high_laneq_s32(__p0_564, __p1_564, __p2_564, __p3_564) __extension__ ({ \ - int64x2_t __ret_564; \ - int64x2_t __s0_564 = __p0_564; \ - int32x4_t __s1_564 = __p1_564; \ - int32x4_t __s2_564 = __p2_564; \ - int64x2_t __rev0_564; __rev0_564 = __builtin_shufflevector(__s0_564, __s0_564, 1, 0); \ - int32x4_t __rev1_564; __rev1_564 = __builtin_shufflevector(__s1_564, __s1_564, 3, 2, 1, 0); \ - int32x4_t __rev2_564; __rev2_564 = __builtin_shufflevector(__s2_564, __s2_564, 3, 2, 1, 0); \ - __ret_564 = __rev0_564 + __noswap_vmull_s32(__noswap_vget_high_s32(__rev1_564), __noswap_splat_laneq_s32(__rev2_564, __p3_564)); \ - __ret_564 = __builtin_shufflevector(__ret_564, __ret_564, 1, 0); \ - __ret_564; \ +#define vmlal_high_laneq_s32(__p0_460, __p1_460, __p2_460, __p3_460) __extension__ ({ \ + int64x2_t __ret_460; \ + int64x2_t __s0_460 = __p0_460; \ + int32x4_t __s1_460 = __p1_460; \ + int32x4_t __s2_460 = __p2_460; \ + int64x2_t __rev0_460; __rev0_460 = __builtin_shufflevector(__s0_460, __s0_460, 1, 0); \ + int32x4_t __rev1_460; __rev1_460 = __builtin_shufflevector(__s1_460, __s1_460, 3, 2, 1, 0); \ + int32x4_t __rev2_460; __rev2_460 = __builtin_shufflevector(__s2_460, __s2_460, 3, 2, 1, 0); \ + __ret_460 = __rev0_460 + __noswap_vmull_s32(__noswap_vget_high_s32(__rev1_460), __noswap_splat_laneq_s32(__rev2_460, __p3_460)); \ + __ret_460 = __builtin_shufflevector(__ret_460, __ret_460, 1, 0); \ + __ret_460; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vmlal_high_laneq_s16(__p0_565, __p1_565, __p2_565, __p3_565) __extension__ ({ \ - int32x4_t __ret_565; \ - int32x4_t __s0_565 = __p0_565; \ - int16x8_t __s1_565 = __p1_565; \ - int16x8_t __s2_565 = __p2_565; \ - __ret_565 = __s0_565 + vmull_s16(vget_high_s16(__s1_565), splat_laneq_s16(__s2_565, __p3_565)); \ - __ret_565; \ +#define vmlal_high_laneq_s16(__p0_461, __p1_461, __p2_461, __p3_461) __extension__ ({ \ + int32x4_t __ret_461; \ + int32x4_t __s0_461 = __p0_461; \ + int16x8_t __s1_461 = __p1_461; \ + int16x8_t __s2_461 = __p2_461; \ + __ret_461 = __s0_461 + vmull_s16(vget_high_s16(__s1_461), splat_laneq_s16(__s2_461, __p3_461)); \ + __ret_461; \ }) #else -#define vmlal_high_laneq_s16(__p0_566, __p1_566, __p2_566, __p3_566) __extension__ ({ \ - int32x4_t __ret_566; \ - int32x4_t __s0_566 = __p0_566; \ - int16x8_t __s1_566 = __p1_566; \ - int16x8_t __s2_566 = __p2_566; \ - int32x4_t __rev0_566; __rev0_566 = __builtin_shufflevector(__s0_566, __s0_566, 3, 2, 1, 0); \ - int16x8_t __rev1_566; __rev1_566 = __builtin_shufflevector(__s1_566, __s1_566, 7, 6, 5, 4, 3, 2, 1, 0); \ - int16x8_t __rev2_566; __rev2_566 = __builtin_shufflevector(__s2_566, __s2_566, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_566 = __rev0_566 + __noswap_vmull_s16(__noswap_vget_high_s16(__rev1_566), __noswap_splat_laneq_s16(__rev2_566, __p3_566)); \ - __ret_566 = __builtin_shufflevector(__ret_566, __ret_566, 3, 2, 1, 0); \ - __ret_566; \ +#define vmlal_high_laneq_s16(__p0_462, __p1_462, __p2_462, __p3_462) __extension__ ({ \ + int32x4_t __ret_462; \ + int32x4_t __s0_462 = __p0_462; \ + int16x8_t __s1_462 = __p1_462; \ + int16x8_t __s2_462 = __p2_462; \ + int32x4_t __rev0_462; __rev0_462 = __builtin_shufflevector(__s0_462, __s0_462, 3, 2, 1, 0); \ + int16x8_t __rev1_462; __rev1_462 = __builtin_shufflevector(__s1_462, __s1_462, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __rev2_462; __rev2_462 = __builtin_shufflevector(__s2_462, __s2_462, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_462 = __rev0_462 + __noswap_vmull_s16(__noswap_vget_high_s16(__rev1_462), __noswap_splat_laneq_s16(__rev2_462, __p3_462)); \ + __ret_462 = __builtin_shufflevector(__ret_462, __ret_462, 3, 2, 1, 0); \ + __ret_462; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vmlal_laneq_u32(__p0_567, __p1_567, __p2_567, __p3_567) __extension__ ({ \ - uint64x2_t __ret_567; \ - uint64x2_t __s0_567 = __p0_567; \ - uint32x2_t __s1_567 = __p1_567; \ - uint32x4_t __s2_567 = __p2_567; \ - __ret_567 = __s0_567 + vmull_u32(__s1_567, splat_laneq_u32(__s2_567, __p3_567)); \ - __ret_567; \ +#define vmlal_laneq_u32(__p0_463, __p1_463, __p2_463, __p3_463) __extension__ ({ \ + uint64x2_t __ret_463; \ + uint64x2_t __s0_463 = __p0_463; \ + uint32x2_t __s1_463 = __p1_463; \ + uint32x4_t __s2_463 = __p2_463; \ + __ret_463 = __s0_463 + vmull_u32(__s1_463, splat_laneq_u32(__s2_463, __p3_463)); \ + __ret_463; \ }) #else -#define vmlal_laneq_u32(__p0_568, __p1_568, __p2_568, __p3_568) __extension__ ({ \ - uint64x2_t __ret_568; \ - uint64x2_t __s0_568 = __p0_568; \ - uint32x2_t __s1_568 = __p1_568; \ - uint32x4_t __s2_568 = __p2_568; \ - uint64x2_t __rev0_568; __rev0_568 = __builtin_shufflevector(__s0_568, __s0_568, 1, 0); \ - uint32x2_t __rev1_568; __rev1_568 = __builtin_shufflevector(__s1_568, __s1_568, 1, 0); \ - uint32x4_t __rev2_568; __rev2_568 = __builtin_shufflevector(__s2_568, __s2_568, 3, 2, 1, 0); \ - __ret_568 = __rev0_568 + __noswap_vmull_u32(__rev1_568, __noswap_splat_laneq_u32(__rev2_568, __p3_568)); \ - __ret_568 = __builtin_shufflevector(__ret_568, __ret_568, 1, 0); \ - __ret_568; \ +#define vmlal_laneq_u32(__p0_464, __p1_464, __p2_464, __p3_464) __extension__ ({ \ + uint64x2_t __ret_464; \ + uint64x2_t __s0_464 = __p0_464; \ + uint32x2_t __s1_464 = __p1_464; \ + uint32x4_t __s2_464 = __p2_464; \ + uint64x2_t __rev0_464; __rev0_464 = __builtin_shufflevector(__s0_464, __s0_464, 1, 0); \ + uint32x2_t __rev1_464; __rev1_464 = __builtin_shufflevector(__s1_464, __s1_464, 1, 0); \ + uint32x4_t __rev2_464; __rev2_464 = __builtin_shufflevector(__s2_464, __s2_464, 3, 2, 1, 0); \ + __ret_464 = __rev0_464 + __noswap_vmull_u32(__rev1_464, __noswap_splat_laneq_u32(__rev2_464, __p3_464)); \ + __ret_464 = __builtin_shufflevector(__ret_464, __ret_464, 1, 0); \ + __ret_464; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vmlal_laneq_u16(__p0_569, __p1_569, __p2_569, __p3_569) __extension__ ({ \ - uint32x4_t __ret_569; \ - uint32x4_t __s0_569 = __p0_569; \ - uint16x4_t __s1_569 = __p1_569; \ - uint16x8_t __s2_569 = __p2_569; \ - __ret_569 = __s0_569 + vmull_u16(__s1_569, splat_laneq_u16(__s2_569, __p3_569)); \ - __ret_569; \ +#define vmlal_laneq_u16(__p0_465, __p1_465, __p2_465, __p3_465) __extension__ ({ \ + uint32x4_t __ret_465; \ + uint32x4_t __s0_465 = __p0_465; \ + uint16x4_t __s1_465 = __p1_465; \ + uint16x8_t __s2_465 = __p2_465; \ + __ret_465 = __s0_465 + vmull_u16(__s1_465, splat_laneq_u16(__s2_465, __p3_465)); \ + __ret_465; \ }) #else -#define vmlal_laneq_u16(__p0_570, __p1_570, __p2_570, __p3_570) __extension__ ({ \ - uint32x4_t __ret_570; \ - uint32x4_t __s0_570 = __p0_570; \ - uint16x4_t __s1_570 = __p1_570; \ - uint16x8_t __s2_570 = __p2_570; \ - uint32x4_t __rev0_570; __rev0_570 = __builtin_shufflevector(__s0_570, __s0_570, 3, 2, 1, 0); \ - uint16x4_t __rev1_570; __rev1_570 = __builtin_shufflevector(__s1_570, __s1_570, 3, 2, 1, 0); \ - uint16x8_t __rev2_570; __rev2_570 = __builtin_shufflevector(__s2_570, __s2_570, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_570 = __rev0_570 + __noswap_vmull_u16(__rev1_570, __noswap_splat_laneq_u16(__rev2_570, __p3_570)); \ - __ret_570 = __builtin_shufflevector(__ret_570, __ret_570, 3, 2, 1, 0); \ - __ret_570; \ +#define vmlal_laneq_u16(__p0_466, __p1_466, __p2_466, __p3_466) __extension__ ({ \ + uint32x4_t __ret_466; \ + uint32x4_t __s0_466 = __p0_466; \ + uint16x4_t __s1_466 = __p1_466; \ + uint16x8_t __s2_466 = __p2_466; \ + uint32x4_t __rev0_466; __rev0_466 = __builtin_shufflevector(__s0_466, __s0_466, 3, 2, 1, 0); \ + uint16x4_t __rev1_466; __rev1_466 = __builtin_shufflevector(__s1_466, __s1_466, 3, 2, 1, 0); \ + uint16x8_t __rev2_466; __rev2_466 = __builtin_shufflevector(__s2_466, __s2_466, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_466 = __rev0_466 + __noswap_vmull_u16(__rev1_466, __noswap_splat_laneq_u16(__rev2_466, __p3_466)); \ + __ret_466 = __builtin_shufflevector(__ret_466, __ret_466, 3, 2, 1, 0); \ + __ret_466; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vmlal_laneq_s32(__p0_571, __p1_571, __p2_571, __p3_571) __extension__ ({ \ - int64x2_t __ret_571; \ - int64x2_t __s0_571 = __p0_571; \ - int32x2_t __s1_571 = __p1_571; \ - int32x4_t __s2_571 = __p2_571; \ - __ret_571 = __s0_571 + vmull_s32(__s1_571, splat_laneq_s32(__s2_571, __p3_571)); \ - __ret_571; \ +#define vmlal_laneq_s32(__p0_467, __p1_467, __p2_467, __p3_467) __extension__ ({ \ + int64x2_t __ret_467; \ + int64x2_t __s0_467 = __p0_467; \ + int32x2_t __s1_467 = __p1_467; \ + int32x4_t __s2_467 = __p2_467; \ + __ret_467 = __s0_467 + vmull_s32(__s1_467, splat_laneq_s32(__s2_467, __p3_467)); \ + __ret_467; \ }) #else -#define vmlal_laneq_s32(__p0_572, __p1_572, __p2_572, __p3_572) __extension__ ({ \ - int64x2_t __ret_572; \ - int64x2_t __s0_572 = __p0_572; \ - int32x2_t __s1_572 = __p1_572; \ - int32x4_t __s2_572 = __p2_572; \ - int64x2_t __rev0_572; __rev0_572 = __builtin_shufflevector(__s0_572, __s0_572, 1, 0); \ - int32x2_t __rev1_572; __rev1_572 = __builtin_shufflevector(__s1_572, __s1_572, 1, 0); \ - int32x4_t __rev2_572; __rev2_572 = __builtin_shufflevector(__s2_572, __s2_572, 3, 2, 1, 0); \ - __ret_572 = __rev0_572 + __noswap_vmull_s32(__rev1_572, __noswap_splat_laneq_s32(__rev2_572, __p3_572)); \ - __ret_572 = __builtin_shufflevector(__ret_572, __ret_572, 1, 0); \ - __ret_572; \ +#define vmlal_laneq_s32(__p0_468, __p1_468, __p2_468, __p3_468) __extension__ ({ \ + int64x2_t __ret_468; \ + int64x2_t __s0_468 = __p0_468; \ + int32x2_t __s1_468 = __p1_468; \ + int32x4_t __s2_468 = __p2_468; \ + int64x2_t __rev0_468; __rev0_468 = __builtin_shufflevector(__s0_468, __s0_468, 1, 0); \ + int32x2_t __rev1_468; __rev1_468 = __builtin_shufflevector(__s1_468, __s1_468, 1, 0); \ + int32x4_t __rev2_468; __rev2_468 = __builtin_shufflevector(__s2_468, __s2_468, 3, 2, 1, 0); \ + __ret_468 = __rev0_468 + __noswap_vmull_s32(__rev1_468, __noswap_splat_laneq_s32(__rev2_468, __p3_468)); \ + __ret_468 = __builtin_shufflevector(__ret_468, __ret_468, 1, 0); \ + __ret_468; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vmlal_laneq_s16(__p0_573, __p1_573, __p2_573, __p3_573) __extension__ ({ \ - int32x4_t __ret_573; \ - int32x4_t __s0_573 = __p0_573; \ - int16x4_t __s1_573 = __p1_573; \ - int16x8_t __s2_573 = __p2_573; \ - __ret_573 = __s0_573 + vmull_s16(__s1_573, splat_laneq_s16(__s2_573, __p3_573)); \ - __ret_573; \ +#define vmlal_laneq_s16(__p0_469, __p1_469, __p2_469, __p3_469) __extension__ ({ \ + int32x4_t __ret_469; \ + int32x4_t __s0_469 = __p0_469; \ + int16x4_t __s1_469 = __p1_469; \ + int16x8_t __s2_469 = __p2_469; \ + __ret_469 = __s0_469 + vmull_s16(__s1_469, splat_laneq_s16(__s2_469, __p3_469)); \ + __ret_469; \ }) #else -#define vmlal_laneq_s16(__p0_574, __p1_574, __p2_574, __p3_574) __extension__ ({ \ - int32x4_t __ret_574; \ - int32x4_t __s0_574 = __p0_574; \ - int16x4_t __s1_574 = __p1_574; \ - int16x8_t __s2_574 = __p2_574; \ - int32x4_t __rev0_574; __rev0_574 = __builtin_shufflevector(__s0_574, __s0_574, 3, 2, 1, 0); \ - int16x4_t __rev1_574; __rev1_574 = __builtin_shufflevector(__s1_574, __s1_574, 3, 2, 1, 0); \ - int16x8_t __rev2_574; __rev2_574 = __builtin_shufflevector(__s2_574, __s2_574, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_574 = __rev0_574 + __noswap_vmull_s16(__rev1_574, __noswap_splat_laneq_s16(__rev2_574, __p3_574)); \ - __ret_574 = __builtin_shufflevector(__ret_574, __ret_574, 3, 2, 1, 0); \ - __ret_574; \ +#define vmlal_laneq_s16(__p0_470, __p1_470, __p2_470, __p3_470) __extension__ ({ \ + int32x4_t __ret_470; \ + int32x4_t __s0_470 = __p0_470; \ + int16x4_t __s1_470 = __p1_470; \ + int16x8_t __s2_470 = __p2_470; \ + int32x4_t __rev0_470; __rev0_470 = __builtin_shufflevector(__s0_470, __s0_470, 3, 2, 1, 0); \ + int16x4_t __rev1_470; __rev1_470 = __builtin_shufflevector(__s1_470, __s1_470, 3, 2, 1, 0); \ + int16x8_t __rev2_470; __rev2_470 = __builtin_shufflevector(__s2_470, __s2_470, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_470 = __rev0_470 + __noswap_vmull_s16(__rev1_470, __noswap_splat_laneq_s16(__rev2_470, __p3_470)); \ + __ret_470 = __builtin_shufflevector(__ret_470, __ret_470, 3, 2, 1, 0); \ + __ret_470; \ }) #endif @@ -55903,530 +49996,530 @@ __ai float64x1_t vmls_f64(float64x1_t __p0, float64x1_t __p1, float64x1_t __p2) return __ret; } #ifdef __LITTLE_ENDIAN__ -#define vmlsq_laneq_u32(__p0_575, __p1_575, __p2_575, __p3_575) __extension__ ({ \ - uint32x4_t __ret_575; \ - uint32x4_t __s0_575 = __p0_575; \ - uint32x4_t __s1_575 = __p1_575; \ - uint32x4_t __s2_575 = __p2_575; \ - __ret_575 = __s0_575 - __s1_575 * splatq_laneq_u32(__s2_575, __p3_575); \ - __ret_575; \ +#define vmlsq_laneq_u32(__p0_471, __p1_471, __p2_471, __p3_471) __extension__ ({ \ + uint32x4_t __ret_471; \ + uint32x4_t __s0_471 = __p0_471; \ + uint32x4_t __s1_471 = __p1_471; \ + uint32x4_t __s2_471 = __p2_471; \ + __ret_471 = __s0_471 - __s1_471 * splatq_laneq_u32(__s2_471, __p3_471); \ + __ret_471; \ }) #else -#define vmlsq_laneq_u32(__p0_576, __p1_576, __p2_576, __p3_576) __extension__ ({ \ - uint32x4_t __ret_576; \ - uint32x4_t __s0_576 = __p0_576; \ - uint32x4_t __s1_576 = __p1_576; \ - uint32x4_t __s2_576 = __p2_576; \ - uint32x4_t __rev0_576; __rev0_576 = __builtin_shufflevector(__s0_576, __s0_576, 3, 2, 1, 0); \ - uint32x4_t __rev1_576; __rev1_576 = __builtin_shufflevector(__s1_576, __s1_576, 3, 2, 1, 0); \ - uint32x4_t __rev2_576; __rev2_576 = __builtin_shufflevector(__s2_576, __s2_576, 3, 2, 1, 0); \ - __ret_576 = __rev0_576 - __rev1_576 * __noswap_splatq_laneq_u32(__rev2_576, __p3_576); \ - __ret_576 = __builtin_shufflevector(__ret_576, __ret_576, 3, 2, 1, 0); \ - __ret_576; \ +#define vmlsq_laneq_u32(__p0_472, __p1_472, __p2_472, __p3_472) __extension__ ({ \ + uint32x4_t __ret_472; \ + uint32x4_t __s0_472 = __p0_472; \ + uint32x4_t __s1_472 = __p1_472; \ + uint32x4_t __s2_472 = __p2_472; \ + uint32x4_t __rev0_472; __rev0_472 = __builtin_shufflevector(__s0_472, __s0_472, 3, 2, 1, 0); \ + uint32x4_t __rev1_472; __rev1_472 = __builtin_shufflevector(__s1_472, __s1_472, 3, 2, 1, 0); \ + uint32x4_t __rev2_472; __rev2_472 = __builtin_shufflevector(__s2_472, __s2_472, 3, 2, 1, 0); \ + __ret_472 = __rev0_472 - __rev1_472 * __noswap_splatq_laneq_u32(__rev2_472, __p3_472); \ + __ret_472 = __builtin_shufflevector(__ret_472, __ret_472, 3, 2, 1, 0); \ + __ret_472; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vmlsq_laneq_u16(__p0_577, __p1_577, __p2_577, __p3_577) __extension__ ({ \ - uint16x8_t __ret_577; \ - uint16x8_t __s0_577 = __p0_577; \ - uint16x8_t __s1_577 = __p1_577; \ - uint16x8_t __s2_577 = __p2_577; \ - __ret_577 = __s0_577 - __s1_577 * splatq_laneq_u16(__s2_577, __p3_577); \ - __ret_577; \ +#define vmlsq_laneq_u16(__p0_473, __p1_473, __p2_473, __p3_473) __extension__ ({ \ + uint16x8_t __ret_473; \ + uint16x8_t __s0_473 = __p0_473; \ + uint16x8_t __s1_473 = __p1_473; \ + uint16x8_t __s2_473 = __p2_473; \ + __ret_473 = __s0_473 - __s1_473 * splatq_laneq_u16(__s2_473, __p3_473); \ + __ret_473; \ }) #else -#define vmlsq_laneq_u16(__p0_578, __p1_578, __p2_578, __p3_578) __extension__ ({ \ - uint16x8_t __ret_578; \ - uint16x8_t __s0_578 = __p0_578; \ - uint16x8_t __s1_578 = __p1_578; \ - uint16x8_t __s2_578 = __p2_578; \ - uint16x8_t __rev0_578; __rev0_578 = __builtin_shufflevector(__s0_578, __s0_578, 7, 6, 5, 4, 3, 2, 1, 0); \ - uint16x8_t __rev1_578; __rev1_578 = __builtin_shufflevector(__s1_578, __s1_578, 7, 6, 5, 4, 3, 2, 1, 0); \ - uint16x8_t __rev2_578; __rev2_578 = __builtin_shufflevector(__s2_578, __s2_578, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_578 = __rev0_578 - __rev1_578 * __noswap_splatq_laneq_u16(__rev2_578, __p3_578); \ - __ret_578 = __builtin_shufflevector(__ret_578, __ret_578, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_578; \ +#define vmlsq_laneq_u16(__p0_474, __p1_474, __p2_474, __p3_474) __extension__ ({ \ + uint16x8_t __ret_474; \ + uint16x8_t __s0_474 = __p0_474; \ + uint16x8_t __s1_474 = __p1_474; \ + uint16x8_t __s2_474 = __p2_474; \ + uint16x8_t __rev0_474; __rev0_474 = __builtin_shufflevector(__s0_474, __s0_474, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint16x8_t __rev1_474; __rev1_474 = __builtin_shufflevector(__s1_474, __s1_474, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint16x8_t __rev2_474; __rev2_474 = __builtin_shufflevector(__s2_474, __s2_474, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_474 = __rev0_474 - __rev1_474 * __noswap_splatq_laneq_u16(__rev2_474, __p3_474); \ + __ret_474 = __builtin_shufflevector(__ret_474, __ret_474, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_474; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vmlsq_laneq_f32(__p0_579, __p1_579, __p2_579, __p3_579) __extension__ ({ \ - float32x4_t __ret_579; \ - float32x4_t __s0_579 = __p0_579; \ - float32x4_t __s1_579 = __p1_579; \ - float32x4_t __s2_579 = __p2_579; \ - __ret_579 = __s0_579 - __s1_579 * splatq_laneq_f32(__s2_579, __p3_579); \ - __ret_579; \ +#define vmlsq_laneq_f32(__p0_475, __p1_475, __p2_475, __p3_475) __extension__ ({ \ + float32x4_t __ret_475; \ + float32x4_t __s0_475 = __p0_475; \ + float32x4_t __s1_475 = __p1_475; \ + float32x4_t __s2_475 = __p2_475; \ + __ret_475 = __s0_475 - __s1_475 * splatq_laneq_f32(__s2_475, __p3_475); \ + __ret_475; \ }) #else -#define vmlsq_laneq_f32(__p0_580, __p1_580, __p2_580, __p3_580) __extension__ ({ \ - float32x4_t __ret_580; \ - float32x4_t __s0_580 = __p0_580; \ - float32x4_t __s1_580 = __p1_580; \ - float32x4_t __s2_580 = __p2_580; \ - float32x4_t __rev0_580; __rev0_580 = __builtin_shufflevector(__s0_580, __s0_580, 3, 2, 1, 0); \ - float32x4_t __rev1_580; __rev1_580 = __builtin_shufflevector(__s1_580, __s1_580, 3, 2, 1, 0); \ - float32x4_t __rev2_580; __rev2_580 = __builtin_shufflevector(__s2_580, __s2_580, 3, 2, 1, 0); \ - __ret_580 = __rev0_580 - __rev1_580 * __noswap_splatq_laneq_f32(__rev2_580, __p3_580); \ - __ret_580 = __builtin_shufflevector(__ret_580, __ret_580, 3, 2, 1, 0); \ - __ret_580; \ +#define vmlsq_laneq_f32(__p0_476, __p1_476, __p2_476, __p3_476) __extension__ ({ \ + float32x4_t __ret_476; \ + float32x4_t __s0_476 = __p0_476; \ + float32x4_t __s1_476 = __p1_476; \ + float32x4_t __s2_476 = __p2_476; \ + float32x4_t __rev0_476; __rev0_476 = __builtin_shufflevector(__s0_476, __s0_476, 3, 2, 1, 0); \ + float32x4_t __rev1_476; __rev1_476 = __builtin_shufflevector(__s1_476, __s1_476, 3, 2, 1, 0); \ + float32x4_t __rev2_476; __rev2_476 = __builtin_shufflevector(__s2_476, __s2_476, 3, 2, 1, 0); \ + __ret_476 = __rev0_476 - __rev1_476 * __noswap_splatq_laneq_f32(__rev2_476, __p3_476); \ + __ret_476 = __builtin_shufflevector(__ret_476, __ret_476, 3, 2, 1, 0); \ + __ret_476; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vmlsq_laneq_s32(__p0_581, __p1_581, __p2_581, __p3_581) __extension__ ({ \ - int32x4_t __ret_581; \ - int32x4_t __s0_581 = __p0_581; \ - int32x4_t __s1_581 = __p1_581; \ - int32x4_t __s2_581 = __p2_581; \ - __ret_581 = __s0_581 - __s1_581 * splatq_laneq_s32(__s2_581, __p3_581); \ - __ret_581; \ +#define vmlsq_laneq_s32(__p0_477, __p1_477, __p2_477, __p3_477) __extension__ ({ \ + int32x4_t __ret_477; \ + int32x4_t __s0_477 = __p0_477; \ + int32x4_t __s1_477 = __p1_477; \ + int32x4_t __s2_477 = __p2_477; \ + __ret_477 = __s0_477 - __s1_477 * splatq_laneq_s32(__s2_477, __p3_477); \ + __ret_477; \ }) #else -#define vmlsq_laneq_s32(__p0_582, __p1_582, __p2_582, __p3_582) __extension__ ({ \ - int32x4_t __ret_582; \ - int32x4_t __s0_582 = __p0_582; \ - int32x4_t __s1_582 = __p1_582; \ - int32x4_t __s2_582 = __p2_582; \ - int32x4_t __rev0_582; __rev0_582 = __builtin_shufflevector(__s0_582, __s0_582, 3, 2, 1, 0); \ - int32x4_t __rev1_582; __rev1_582 = __builtin_shufflevector(__s1_582, __s1_582, 3, 2, 1, 0); \ - int32x4_t __rev2_582; __rev2_582 = __builtin_shufflevector(__s2_582, __s2_582, 3, 2, 1, 0); \ - __ret_582 = __rev0_582 - __rev1_582 * __noswap_splatq_laneq_s32(__rev2_582, __p3_582); \ - __ret_582 = __builtin_shufflevector(__ret_582, __ret_582, 3, 2, 1, 0); \ - __ret_582; \ +#define vmlsq_laneq_s32(__p0_478, __p1_478, __p2_478, __p3_478) __extension__ ({ \ + int32x4_t __ret_478; \ + int32x4_t __s0_478 = __p0_478; \ + int32x4_t __s1_478 = __p1_478; \ + int32x4_t __s2_478 = __p2_478; \ + int32x4_t __rev0_478; __rev0_478 = __builtin_shufflevector(__s0_478, __s0_478, 3, 2, 1, 0); \ + int32x4_t __rev1_478; __rev1_478 = __builtin_shufflevector(__s1_478, __s1_478, 3, 2, 1, 0); \ + int32x4_t __rev2_478; __rev2_478 = __builtin_shufflevector(__s2_478, __s2_478, 3, 2, 1, 0); \ + __ret_478 = __rev0_478 - __rev1_478 * __noswap_splatq_laneq_s32(__rev2_478, __p3_478); \ + __ret_478 = __builtin_shufflevector(__ret_478, __ret_478, 3, 2, 1, 0); \ + __ret_478; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vmlsq_laneq_s16(__p0_583, __p1_583, __p2_583, __p3_583) __extension__ ({ \ - int16x8_t __ret_583; \ - int16x8_t __s0_583 = __p0_583; \ - int16x8_t __s1_583 = __p1_583; \ - int16x8_t __s2_583 = __p2_583; \ - __ret_583 = __s0_583 - __s1_583 * splatq_laneq_s16(__s2_583, __p3_583); \ - __ret_583; \ +#define vmlsq_laneq_s16(__p0_479, __p1_479, __p2_479, __p3_479) __extension__ ({ \ + int16x8_t __ret_479; \ + int16x8_t __s0_479 = __p0_479; \ + int16x8_t __s1_479 = __p1_479; \ + int16x8_t __s2_479 = __p2_479; \ + __ret_479 = __s0_479 - __s1_479 * splatq_laneq_s16(__s2_479, __p3_479); \ + __ret_479; \ }) #else -#define vmlsq_laneq_s16(__p0_584, __p1_584, __p2_584, __p3_584) __extension__ ({ \ - int16x8_t __ret_584; \ - int16x8_t __s0_584 = __p0_584; \ - int16x8_t __s1_584 = __p1_584; \ - int16x8_t __s2_584 = __p2_584; \ - int16x8_t __rev0_584; __rev0_584 = __builtin_shufflevector(__s0_584, __s0_584, 7, 6, 5, 4, 3, 2, 1, 0); \ - int16x8_t __rev1_584; __rev1_584 = __builtin_shufflevector(__s1_584, __s1_584, 7, 6, 5, 4, 3, 2, 1, 0); \ - int16x8_t __rev2_584; __rev2_584 = __builtin_shufflevector(__s2_584, __s2_584, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_584 = __rev0_584 - __rev1_584 * __noswap_splatq_laneq_s16(__rev2_584, __p3_584); \ - __ret_584 = __builtin_shufflevector(__ret_584, __ret_584, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_584; \ +#define vmlsq_laneq_s16(__p0_480, __p1_480, __p2_480, __p3_480) __extension__ ({ \ + int16x8_t __ret_480; \ + int16x8_t __s0_480 = __p0_480; \ + int16x8_t __s1_480 = __p1_480; \ + int16x8_t __s2_480 = __p2_480; \ + int16x8_t __rev0_480; __rev0_480 = __builtin_shufflevector(__s0_480, __s0_480, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __rev1_480; __rev1_480 = __builtin_shufflevector(__s1_480, __s1_480, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __rev2_480; __rev2_480 = __builtin_shufflevector(__s2_480, __s2_480, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_480 = __rev0_480 - __rev1_480 * __noswap_splatq_laneq_s16(__rev2_480, __p3_480); \ + __ret_480 = __builtin_shufflevector(__ret_480, __ret_480, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_480; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vmls_laneq_u32(__p0_585, __p1_585, __p2_585, __p3_585) __extension__ ({ \ - uint32x2_t __ret_585; \ - uint32x2_t __s0_585 = __p0_585; \ - uint32x2_t __s1_585 = __p1_585; \ - uint32x4_t __s2_585 = __p2_585; \ - __ret_585 = __s0_585 - __s1_585 * splat_laneq_u32(__s2_585, __p3_585); \ - __ret_585; \ +#define vmls_laneq_u32(__p0_481, __p1_481, __p2_481, __p3_481) __extension__ ({ \ + uint32x2_t __ret_481; \ + uint32x2_t __s0_481 = __p0_481; \ + uint32x2_t __s1_481 = __p1_481; \ + uint32x4_t __s2_481 = __p2_481; \ + __ret_481 = __s0_481 - __s1_481 * splat_laneq_u32(__s2_481, __p3_481); \ + __ret_481; \ }) #else -#define vmls_laneq_u32(__p0_586, __p1_586, __p2_586, __p3_586) __extension__ ({ \ - uint32x2_t __ret_586; \ - uint32x2_t __s0_586 = __p0_586; \ - uint32x2_t __s1_586 = __p1_586; \ - uint32x4_t __s2_586 = __p2_586; \ - uint32x2_t __rev0_586; __rev0_586 = __builtin_shufflevector(__s0_586, __s0_586, 1, 0); \ - uint32x2_t __rev1_586; __rev1_586 = __builtin_shufflevector(__s1_586, __s1_586, 1, 0); \ - uint32x4_t __rev2_586; __rev2_586 = __builtin_shufflevector(__s2_586, __s2_586, 3, 2, 1, 0); \ - __ret_586 = __rev0_586 - __rev1_586 * __noswap_splat_laneq_u32(__rev2_586, __p3_586); \ - __ret_586 = __builtin_shufflevector(__ret_586, __ret_586, 1, 0); \ - __ret_586; \ +#define vmls_laneq_u32(__p0_482, __p1_482, __p2_482, __p3_482) __extension__ ({ \ + uint32x2_t __ret_482; \ + uint32x2_t __s0_482 = __p0_482; \ + uint32x2_t __s1_482 = __p1_482; \ + uint32x4_t __s2_482 = __p2_482; \ + uint32x2_t __rev0_482; __rev0_482 = __builtin_shufflevector(__s0_482, __s0_482, 1, 0); \ + uint32x2_t __rev1_482; __rev1_482 = __builtin_shufflevector(__s1_482, __s1_482, 1, 0); \ + uint32x4_t __rev2_482; __rev2_482 = __builtin_shufflevector(__s2_482, __s2_482, 3, 2, 1, 0); \ + __ret_482 = __rev0_482 - __rev1_482 * __noswap_splat_laneq_u32(__rev2_482, __p3_482); \ + __ret_482 = __builtin_shufflevector(__ret_482, __ret_482, 1, 0); \ + __ret_482; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vmls_laneq_u16(__p0_587, __p1_587, __p2_587, __p3_587) __extension__ ({ \ - uint16x4_t __ret_587; \ - uint16x4_t __s0_587 = __p0_587; \ - uint16x4_t __s1_587 = __p1_587; \ - uint16x8_t __s2_587 = __p2_587; \ - __ret_587 = __s0_587 - __s1_587 * splat_laneq_u16(__s2_587, __p3_587); \ - __ret_587; \ +#define vmls_laneq_u16(__p0_483, __p1_483, __p2_483, __p3_483) __extension__ ({ \ + uint16x4_t __ret_483; \ + uint16x4_t __s0_483 = __p0_483; \ + uint16x4_t __s1_483 = __p1_483; \ + uint16x8_t __s2_483 = __p2_483; \ + __ret_483 = __s0_483 - __s1_483 * splat_laneq_u16(__s2_483, __p3_483); \ + __ret_483; \ }) #else -#define vmls_laneq_u16(__p0_588, __p1_588, __p2_588, __p3_588) __extension__ ({ \ - uint16x4_t __ret_588; \ - uint16x4_t __s0_588 = __p0_588; \ - uint16x4_t __s1_588 = __p1_588; \ - uint16x8_t __s2_588 = __p2_588; \ - uint16x4_t __rev0_588; __rev0_588 = __builtin_shufflevector(__s0_588, __s0_588, 3, 2, 1, 0); \ - uint16x4_t __rev1_588; __rev1_588 = __builtin_shufflevector(__s1_588, __s1_588, 3, 2, 1, 0); \ - uint16x8_t __rev2_588; __rev2_588 = __builtin_shufflevector(__s2_588, __s2_588, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_588 = __rev0_588 - __rev1_588 * __noswap_splat_laneq_u16(__rev2_588, __p3_588); \ - __ret_588 = __builtin_shufflevector(__ret_588, __ret_588, 3, 2, 1, 0); \ - __ret_588; \ +#define vmls_laneq_u16(__p0_484, __p1_484, __p2_484, __p3_484) __extension__ ({ \ + uint16x4_t __ret_484; \ + uint16x4_t __s0_484 = __p0_484; \ + uint16x4_t __s1_484 = __p1_484; \ + uint16x8_t __s2_484 = __p2_484; \ + uint16x4_t __rev0_484; __rev0_484 = __builtin_shufflevector(__s0_484, __s0_484, 3, 2, 1, 0); \ + uint16x4_t __rev1_484; __rev1_484 = __builtin_shufflevector(__s1_484, __s1_484, 3, 2, 1, 0); \ + uint16x8_t __rev2_484; __rev2_484 = __builtin_shufflevector(__s2_484, __s2_484, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_484 = __rev0_484 - __rev1_484 * __noswap_splat_laneq_u16(__rev2_484, __p3_484); \ + __ret_484 = __builtin_shufflevector(__ret_484, __ret_484, 3, 2, 1, 0); \ + __ret_484; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vmls_laneq_f32(__p0_589, __p1_589, __p2_589, __p3_589) __extension__ ({ \ - float32x2_t __ret_589; \ - float32x2_t __s0_589 = __p0_589; \ - float32x2_t __s1_589 = __p1_589; \ - float32x4_t __s2_589 = __p2_589; \ - __ret_589 = __s0_589 - __s1_589 * splat_laneq_f32(__s2_589, __p3_589); \ - __ret_589; \ +#define vmls_laneq_f32(__p0_485, __p1_485, __p2_485, __p3_485) __extension__ ({ \ + float32x2_t __ret_485; \ + float32x2_t __s0_485 = __p0_485; \ + float32x2_t __s1_485 = __p1_485; \ + float32x4_t __s2_485 = __p2_485; \ + __ret_485 = __s0_485 - __s1_485 * splat_laneq_f32(__s2_485, __p3_485); \ + __ret_485; \ }) #else -#define vmls_laneq_f32(__p0_590, __p1_590, __p2_590, __p3_590) __extension__ ({ \ - float32x2_t __ret_590; \ - float32x2_t __s0_590 = __p0_590; \ - float32x2_t __s1_590 = __p1_590; \ - float32x4_t __s2_590 = __p2_590; \ - float32x2_t __rev0_590; __rev0_590 = __builtin_shufflevector(__s0_590, __s0_590, 1, 0); \ - float32x2_t __rev1_590; __rev1_590 = __builtin_shufflevector(__s1_590, __s1_590, 1, 0); \ - float32x4_t __rev2_590; __rev2_590 = __builtin_shufflevector(__s2_590, __s2_590, 3, 2, 1, 0); \ - __ret_590 = __rev0_590 - __rev1_590 * __noswap_splat_laneq_f32(__rev2_590, __p3_590); \ - __ret_590 = __builtin_shufflevector(__ret_590, __ret_590, 1, 0); \ - __ret_590; \ +#define vmls_laneq_f32(__p0_486, __p1_486, __p2_486, __p3_486) __extension__ ({ \ + float32x2_t __ret_486; \ + float32x2_t __s0_486 = __p0_486; \ + float32x2_t __s1_486 = __p1_486; \ + float32x4_t __s2_486 = __p2_486; \ + float32x2_t __rev0_486; __rev0_486 = __builtin_shufflevector(__s0_486, __s0_486, 1, 0); \ + float32x2_t __rev1_486; __rev1_486 = __builtin_shufflevector(__s1_486, __s1_486, 1, 0); \ + float32x4_t __rev2_486; __rev2_486 = __builtin_shufflevector(__s2_486, __s2_486, 3, 2, 1, 0); \ + __ret_486 = __rev0_486 - __rev1_486 * __noswap_splat_laneq_f32(__rev2_486, __p3_486); \ + __ret_486 = __builtin_shufflevector(__ret_486, __ret_486, 1, 0); \ + __ret_486; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vmls_laneq_s32(__p0_591, __p1_591, __p2_591, __p3_591) __extension__ ({ \ - int32x2_t __ret_591; \ - int32x2_t __s0_591 = __p0_591; \ - int32x2_t __s1_591 = __p1_591; \ - int32x4_t __s2_591 = __p2_591; \ - __ret_591 = __s0_591 - __s1_591 * splat_laneq_s32(__s2_591, __p3_591); \ - __ret_591; \ +#define vmls_laneq_s32(__p0_487, __p1_487, __p2_487, __p3_487) __extension__ ({ \ + int32x2_t __ret_487; \ + int32x2_t __s0_487 = __p0_487; \ + int32x2_t __s1_487 = __p1_487; \ + int32x4_t __s2_487 = __p2_487; \ + __ret_487 = __s0_487 - __s1_487 * splat_laneq_s32(__s2_487, __p3_487); \ + __ret_487; \ }) #else -#define vmls_laneq_s32(__p0_592, __p1_592, __p2_592, __p3_592) __extension__ ({ \ - int32x2_t __ret_592; \ - int32x2_t __s0_592 = __p0_592; \ - int32x2_t __s1_592 = __p1_592; \ - int32x4_t __s2_592 = __p2_592; \ - int32x2_t __rev0_592; __rev0_592 = __builtin_shufflevector(__s0_592, __s0_592, 1, 0); \ - int32x2_t __rev1_592; __rev1_592 = __builtin_shufflevector(__s1_592, __s1_592, 1, 0); \ - int32x4_t __rev2_592; __rev2_592 = __builtin_shufflevector(__s2_592, __s2_592, 3, 2, 1, 0); \ - __ret_592 = __rev0_592 - __rev1_592 * __noswap_splat_laneq_s32(__rev2_592, __p3_592); \ - __ret_592 = __builtin_shufflevector(__ret_592, __ret_592, 1, 0); \ - __ret_592; \ +#define vmls_laneq_s32(__p0_488, __p1_488, __p2_488, __p3_488) __extension__ ({ \ + int32x2_t __ret_488; \ + int32x2_t __s0_488 = __p0_488; \ + int32x2_t __s1_488 = __p1_488; \ + int32x4_t __s2_488 = __p2_488; \ + int32x2_t __rev0_488; __rev0_488 = __builtin_shufflevector(__s0_488, __s0_488, 1, 0); \ + int32x2_t __rev1_488; __rev1_488 = __builtin_shufflevector(__s1_488, __s1_488, 1, 0); \ + int32x4_t __rev2_488; __rev2_488 = __builtin_shufflevector(__s2_488, __s2_488, 3, 2, 1, 0); \ + __ret_488 = __rev0_488 - __rev1_488 * __noswap_splat_laneq_s32(__rev2_488, __p3_488); \ + __ret_488 = __builtin_shufflevector(__ret_488, __ret_488, 1, 0); \ + __ret_488; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vmls_laneq_s16(__p0_593, __p1_593, __p2_593, __p3_593) __extension__ ({ \ - int16x4_t __ret_593; \ - int16x4_t __s0_593 = __p0_593; \ - int16x4_t __s1_593 = __p1_593; \ - int16x8_t __s2_593 = __p2_593; \ - __ret_593 = __s0_593 - __s1_593 * splat_laneq_s16(__s2_593, __p3_593); \ - __ret_593; \ +#define vmls_laneq_s16(__p0_489, __p1_489, __p2_489, __p3_489) __extension__ ({ \ + int16x4_t __ret_489; \ + int16x4_t __s0_489 = __p0_489; \ + int16x4_t __s1_489 = __p1_489; \ + int16x8_t __s2_489 = __p2_489; \ + __ret_489 = __s0_489 - __s1_489 * splat_laneq_s16(__s2_489, __p3_489); \ + __ret_489; \ }) #else -#define vmls_laneq_s16(__p0_594, __p1_594, __p2_594, __p3_594) __extension__ ({ \ - int16x4_t __ret_594; \ - int16x4_t __s0_594 = __p0_594; \ - int16x4_t __s1_594 = __p1_594; \ - int16x8_t __s2_594 = __p2_594; \ - int16x4_t __rev0_594; __rev0_594 = __builtin_shufflevector(__s0_594, __s0_594, 3, 2, 1, 0); \ - int16x4_t __rev1_594; __rev1_594 = __builtin_shufflevector(__s1_594, __s1_594, 3, 2, 1, 0); \ - int16x8_t __rev2_594; __rev2_594 = __builtin_shufflevector(__s2_594, __s2_594, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_594 = __rev0_594 - __rev1_594 * __noswap_splat_laneq_s16(__rev2_594, __p3_594); \ - __ret_594 = __builtin_shufflevector(__ret_594, __ret_594, 3, 2, 1, 0); \ - __ret_594; \ +#define vmls_laneq_s16(__p0_490, __p1_490, __p2_490, __p3_490) __extension__ ({ \ + int16x4_t __ret_490; \ + int16x4_t __s0_490 = __p0_490; \ + int16x4_t __s1_490 = __p1_490; \ + int16x8_t __s2_490 = __p2_490; \ + int16x4_t __rev0_490; __rev0_490 = __builtin_shufflevector(__s0_490, __s0_490, 3, 2, 1, 0); \ + int16x4_t __rev1_490; __rev1_490 = __builtin_shufflevector(__s1_490, __s1_490, 3, 2, 1, 0); \ + int16x8_t __rev2_490; __rev2_490 = __builtin_shufflevector(__s2_490, __s2_490, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_490 = __rev0_490 - __rev1_490 * __noswap_splat_laneq_s16(__rev2_490, __p3_490); \ + __ret_490 = __builtin_shufflevector(__ret_490, __ret_490, 3, 2, 1, 0); \ + __ret_490; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vmlsl_high_lane_u32(__p0_595, __p1_595, __p2_595, __p3_595) __extension__ ({ \ - uint64x2_t __ret_595; \ - uint64x2_t __s0_595 = __p0_595; \ - uint32x4_t __s1_595 = __p1_595; \ - uint32x2_t __s2_595 = __p2_595; \ - __ret_595 = __s0_595 - vmull_u32(vget_high_u32(__s1_595), splat_lane_u32(__s2_595, __p3_595)); \ - __ret_595; \ +#define vmlsl_high_lane_u32(__p0_491, __p1_491, __p2_491, __p3_491) __extension__ ({ \ + uint64x2_t __ret_491; \ + uint64x2_t __s0_491 = __p0_491; \ + uint32x4_t __s1_491 = __p1_491; \ + uint32x2_t __s2_491 = __p2_491; \ + __ret_491 = __s0_491 - vmull_u32(vget_high_u32(__s1_491), splat_lane_u32(__s2_491, __p3_491)); \ + __ret_491; \ }) #else -#define vmlsl_high_lane_u32(__p0_596, __p1_596, __p2_596, __p3_596) __extension__ ({ \ - uint64x2_t __ret_596; \ - uint64x2_t __s0_596 = __p0_596; \ - uint32x4_t __s1_596 = __p1_596; \ - uint32x2_t __s2_596 = __p2_596; \ - uint64x2_t __rev0_596; __rev0_596 = __builtin_shufflevector(__s0_596, __s0_596, 1, 0); \ - uint32x4_t __rev1_596; __rev1_596 = __builtin_shufflevector(__s1_596, __s1_596, 3, 2, 1, 0); \ - uint32x2_t __rev2_596; __rev2_596 = __builtin_shufflevector(__s2_596, __s2_596, 1, 0); \ - __ret_596 = __rev0_596 - __noswap_vmull_u32(__noswap_vget_high_u32(__rev1_596), __noswap_splat_lane_u32(__rev2_596, __p3_596)); \ - __ret_596 = __builtin_shufflevector(__ret_596, __ret_596, 1, 0); \ - __ret_596; \ +#define vmlsl_high_lane_u32(__p0_492, __p1_492, __p2_492, __p3_492) __extension__ ({ \ + uint64x2_t __ret_492; \ + uint64x2_t __s0_492 = __p0_492; \ + uint32x4_t __s1_492 = __p1_492; \ + uint32x2_t __s2_492 = __p2_492; \ + uint64x2_t __rev0_492; __rev0_492 = __builtin_shufflevector(__s0_492, __s0_492, 1, 0); \ + uint32x4_t __rev1_492; __rev1_492 = __builtin_shufflevector(__s1_492, __s1_492, 3, 2, 1, 0); \ + uint32x2_t __rev2_492; __rev2_492 = __builtin_shufflevector(__s2_492, __s2_492, 1, 0); \ + __ret_492 = __rev0_492 - __noswap_vmull_u32(__noswap_vget_high_u32(__rev1_492), __noswap_splat_lane_u32(__rev2_492, __p3_492)); \ + __ret_492 = __builtin_shufflevector(__ret_492, __ret_492, 1, 0); \ + __ret_492; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vmlsl_high_lane_u16(__p0_597, __p1_597, __p2_597, __p3_597) __extension__ ({ \ - uint32x4_t __ret_597; \ - uint32x4_t __s0_597 = __p0_597; \ - uint16x8_t __s1_597 = __p1_597; \ - uint16x4_t __s2_597 = __p2_597; \ - __ret_597 = __s0_597 - vmull_u16(vget_high_u16(__s1_597), splat_lane_u16(__s2_597, __p3_597)); \ - __ret_597; \ +#define vmlsl_high_lane_u16(__p0_493, __p1_493, __p2_493, __p3_493) __extension__ ({ \ + uint32x4_t __ret_493; \ + uint32x4_t __s0_493 = __p0_493; \ + uint16x8_t __s1_493 = __p1_493; \ + uint16x4_t __s2_493 = __p2_493; \ + __ret_493 = __s0_493 - vmull_u16(vget_high_u16(__s1_493), splat_lane_u16(__s2_493, __p3_493)); \ + __ret_493; \ }) #else -#define vmlsl_high_lane_u16(__p0_598, __p1_598, __p2_598, __p3_598) __extension__ ({ \ - uint32x4_t __ret_598; \ - uint32x4_t __s0_598 = __p0_598; \ - uint16x8_t __s1_598 = __p1_598; \ - uint16x4_t __s2_598 = __p2_598; \ - uint32x4_t __rev0_598; __rev0_598 = __builtin_shufflevector(__s0_598, __s0_598, 3, 2, 1, 0); \ - uint16x8_t __rev1_598; __rev1_598 = __builtin_shufflevector(__s1_598, __s1_598, 7, 6, 5, 4, 3, 2, 1, 0); \ - uint16x4_t __rev2_598; __rev2_598 = __builtin_shufflevector(__s2_598, __s2_598, 3, 2, 1, 0); \ - __ret_598 = __rev0_598 - __noswap_vmull_u16(__noswap_vget_high_u16(__rev1_598), __noswap_splat_lane_u16(__rev2_598, __p3_598)); \ - __ret_598 = __builtin_shufflevector(__ret_598, __ret_598, 3, 2, 1, 0); \ - __ret_598; \ +#define vmlsl_high_lane_u16(__p0_494, __p1_494, __p2_494, __p3_494) __extension__ ({ \ + uint32x4_t __ret_494; \ + uint32x4_t __s0_494 = __p0_494; \ + uint16x8_t __s1_494 = __p1_494; \ + uint16x4_t __s2_494 = __p2_494; \ + uint32x4_t __rev0_494; __rev0_494 = __builtin_shufflevector(__s0_494, __s0_494, 3, 2, 1, 0); \ + uint16x8_t __rev1_494; __rev1_494 = __builtin_shufflevector(__s1_494, __s1_494, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint16x4_t __rev2_494; __rev2_494 = __builtin_shufflevector(__s2_494, __s2_494, 3, 2, 1, 0); \ + __ret_494 = __rev0_494 - __noswap_vmull_u16(__noswap_vget_high_u16(__rev1_494), __noswap_splat_lane_u16(__rev2_494, __p3_494)); \ + __ret_494 = __builtin_shufflevector(__ret_494, __ret_494, 3, 2, 1, 0); \ + __ret_494; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vmlsl_high_lane_s32(__p0_599, __p1_599, __p2_599, __p3_599) __extension__ ({ \ - int64x2_t __ret_599; \ - int64x2_t __s0_599 = __p0_599; \ - int32x4_t __s1_599 = __p1_599; \ - int32x2_t __s2_599 = __p2_599; \ - __ret_599 = __s0_599 - vmull_s32(vget_high_s32(__s1_599), splat_lane_s32(__s2_599, __p3_599)); \ - __ret_599; \ +#define vmlsl_high_lane_s32(__p0_495, __p1_495, __p2_495, __p3_495) __extension__ ({ \ + int64x2_t __ret_495; \ + int64x2_t __s0_495 = __p0_495; \ + int32x4_t __s1_495 = __p1_495; \ + int32x2_t __s2_495 = __p2_495; \ + __ret_495 = __s0_495 - vmull_s32(vget_high_s32(__s1_495), splat_lane_s32(__s2_495, __p3_495)); \ + __ret_495; \ }) #else -#define vmlsl_high_lane_s32(__p0_600, __p1_600, __p2_600, __p3_600) __extension__ ({ \ - int64x2_t __ret_600; \ - int64x2_t __s0_600 = __p0_600; \ - int32x4_t __s1_600 = __p1_600; \ - int32x2_t __s2_600 = __p2_600; \ - int64x2_t __rev0_600; __rev0_600 = __builtin_shufflevector(__s0_600, __s0_600, 1, 0); \ - int32x4_t __rev1_600; __rev1_600 = __builtin_shufflevector(__s1_600, __s1_600, 3, 2, 1, 0); \ - int32x2_t __rev2_600; __rev2_600 = __builtin_shufflevector(__s2_600, __s2_600, 1, 0); \ - __ret_600 = __rev0_600 - __noswap_vmull_s32(__noswap_vget_high_s32(__rev1_600), __noswap_splat_lane_s32(__rev2_600, __p3_600)); \ - __ret_600 = __builtin_shufflevector(__ret_600, __ret_600, 1, 0); \ - __ret_600; \ +#define vmlsl_high_lane_s32(__p0_496, __p1_496, __p2_496, __p3_496) __extension__ ({ \ + int64x2_t __ret_496; \ + int64x2_t __s0_496 = __p0_496; \ + int32x4_t __s1_496 = __p1_496; \ + int32x2_t __s2_496 = __p2_496; \ + int64x2_t __rev0_496; __rev0_496 = __builtin_shufflevector(__s0_496, __s0_496, 1, 0); \ + int32x4_t __rev1_496; __rev1_496 = __builtin_shufflevector(__s1_496, __s1_496, 3, 2, 1, 0); \ + int32x2_t __rev2_496; __rev2_496 = __builtin_shufflevector(__s2_496, __s2_496, 1, 0); \ + __ret_496 = __rev0_496 - __noswap_vmull_s32(__noswap_vget_high_s32(__rev1_496), __noswap_splat_lane_s32(__rev2_496, __p3_496)); \ + __ret_496 = __builtin_shufflevector(__ret_496, __ret_496, 1, 0); \ + __ret_496; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vmlsl_high_lane_s16(__p0_601, __p1_601, __p2_601, __p3_601) __extension__ ({ \ - int32x4_t __ret_601; \ - int32x4_t __s0_601 = __p0_601; \ - int16x8_t __s1_601 = __p1_601; \ - int16x4_t __s2_601 = __p2_601; \ - __ret_601 = __s0_601 - vmull_s16(vget_high_s16(__s1_601), splat_lane_s16(__s2_601, __p3_601)); \ - __ret_601; \ +#define vmlsl_high_lane_s16(__p0_497, __p1_497, __p2_497, __p3_497) __extension__ ({ \ + int32x4_t __ret_497; \ + int32x4_t __s0_497 = __p0_497; \ + int16x8_t __s1_497 = __p1_497; \ + int16x4_t __s2_497 = __p2_497; \ + __ret_497 = __s0_497 - vmull_s16(vget_high_s16(__s1_497), splat_lane_s16(__s2_497, __p3_497)); \ + __ret_497; \ }) #else -#define vmlsl_high_lane_s16(__p0_602, __p1_602, __p2_602, __p3_602) __extension__ ({ \ - int32x4_t __ret_602; \ - int32x4_t __s0_602 = __p0_602; \ - int16x8_t __s1_602 = __p1_602; \ - int16x4_t __s2_602 = __p2_602; \ - int32x4_t __rev0_602; __rev0_602 = __builtin_shufflevector(__s0_602, __s0_602, 3, 2, 1, 0); \ - int16x8_t __rev1_602; __rev1_602 = __builtin_shufflevector(__s1_602, __s1_602, 7, 6, 5, 4, 3, 2, 1, 0); \ - int16x4_t __rev2_602; __rev2_602 = __builtin_shufflevector(__s2_602, __s2_602, 3, 2, 1, 0); \ - __ret_602 = __rev0_602 - __noswap_vmull_s16(__noswap_vget_high_s16(__rev1_602), __noswap_splat_lane_s16(__rev2_602, __p3_602)); \ - __ret_602 = __builtin_shufflevector(__ret_602, __ret_602, 3, 2, 1, 0); \ - __ret_602; \ +#define vmlsl_high_lane_s16(__p0_498, __p1_498, __p2_498, __p3_498) __extension__ ({ \ + int32x4_t __ret_498; \ + int32x4_t __s0_498 = __p0_498; \ + int16x8_t __s1_498 = __p1_498; \ + int16x4_t __s2_498 = __p2_498; \ + int32x4_t __rev0_498; __rev0_498 = __builtin_shufflevector(__s0_498, __s0_498, 3, 2, 1, 0); \ + int16x8_t __rev1_498; __rev1_498 = __builtin_shufflevector(__s1_498, __s1_498, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x4_t __rev2_498; __rev2_498 = __builtin_shufflevector(__s2_498, __s2_498, 3, 2, 1, 0); \ + __ret_498 = __rev0_498 - __noswap_vmull_s16(__noswap_vget_high_s16(__rev1_498), __noswap_splat_lane_s16(__rev2_498, __p3_498)); \ + __ret_498 = __builtin_shufflevector(__ret_498, __ret_498, 3, 2, 1, 0); \ + __ret_498; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vmlsl_high_laneq_u32(__p0_603, __p1_603, __p2_603, __p3_603) __extension__ ({ \ - uint64x2_t __ret_603; \ - uint64x2_t __s0_603 = __p0_603; \ - uint32x4_t __s1_603 = __p1_603; \ - uint32x4_t __s2_603 = __p2_603; \ - __ret_603 = __s0_603 - vmull_u32(vget_high_u32(__s1_603), splat_laneq_u32(__s2_603, __p3_603)); \ - __ret_603; \ +#define vmlsl_high_laneq_u32(__p0_499, __p1_499, __p2_499, __p3_499) __extension__ ({ \ + uint64x2_t __ret_499; \ + uint64x2_t __s0_499 = __p0_499; \ + uint32x4_t __s1_499 = __p1_499; \ + uint32x4_t __s2_499 = __p2_499; \ + __ret_499 = __s0_499 - vmull_u32(vget_high_u32(__s1_499), splat_laneq_u32(__s2_499, __p3_499)); \ + __ret_499; \ }) #else -#define vmlsl_high_laneq_u32(__p0_604, __p1_604, __p2_604, __p3_604) __extension__ ({ \ - uint64x2_t __ret_604; \ - uint64x2_t __s0_604 = __p0_604; \ - uint32x4_t __s1_604 = __p1_604; \ - uint32x4_t __s2_604 = __p2_604; \ - uint64x2_t __rev0_604; __rev0_604 = __builtin_shufflevector(__s0_604, __s0_604, 1, 0); \ - uint32x4_t __rev1_604; __rev1_604 = __builtin_shufflevector(__s1_604, __s1_604, 3, 2, 1, 0); \ - uint32x4_t __rev2_604; __rev2_604 = __builtin_shufflevector(__s2_604, __s2_604, 3, 2, 1, 0); \ - __ret_604 = __rev0_604 - __noswap_vmull_u32(__noswap_vget_high_u32(__rev1_604), __noswap_splat_laneq_u32(__rev2_604, __p3_604)); \ - __ret_604 = __builtin_shufflevector(__ret_604, __ret_604, 1, 0); \ - __ret_604; \ +#define vmlsl_high_laneq_u32(__p0_500, __p1_500, __p2_500, __p3_500) __extension__ ({ \ + uint64x2_t __ret_500; \ + uint64x2_t __s0_500 = __p0_500; \ + uint32x4_t __s1_500 = __p1_500; \ + uint32x4_t __s2_500 = __p2_500; \ + uint64x2_t __rev0_500; __rev0_500 = __builtin_shufflevector(__s0_500, __s0_500, 1, 0); \ + uint32x4_t __rev1_500; __rev1_500 = __builtin_shufflevector(__s1_500, __s1_500, 3, 2, 1, 0); \ + uint32x4_t __rev2_500; __rev2_500 = __builtin_shufflevector(__s2_500, __s2_500, 3, 2, 1, 0); \ + __ret_500 = __rev0_500 - __noswap_vmull_u32(__noswap_vget_high_u32(__rev1_500), __noswap_splat_laneq_u32(__rev2_500, __p3_500)); \ + __ret_500 = __builtin_shufflevector(__ret_500, __ret_500, 1, 0); \ + __ret_500; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vmlsl_high_laneq_u16(__p0_605, __p1_605, __p2_605, __p3_605) __extension__ ({ \ - uint32x4_t __ret_605; \ - uint32x4_t __s0_605 = __p0_605; \ - uint16x8_t __s1_605 = __p1_605; \ - uint16x8_t __s2_605 = __p2_605; \ - __ret_605 = __s0_605 - vmull_u16(vget_high_u16(__s1_605), splat_laneq_u16(__s2_605, __p3_605)); \ - __ret_605; \ +#define vmlsl_high_laneq_u16(__p0_501, __p1_501, __p2_501, __p3_501) __extension__ ({ \ + uint32x4_t __ret_501; \ + uint32x4_t __s0_501 = __p0_501; \ + uint16x8_t __s1_501 = __p1_501; \ + uint16x8_t __s2_501 = __p2_501; \ + __ret_501 = __s0_501 - vmull_u16(vget_high_u16(__s1_501), splat_laneq_u16(__s2_501, __p3_501)); \ + __ret_501; \ }) #else -#define vmlsl_high_laneq_u16(__p0_606, __p1_606, __p2_606, __p3_606) __extension__ ({ \ - uint32x4_t __ret_606; \ - uint32x4_t __s0_606 = __p0_606; \ - uint16x8_t __s1_606 = __p1_606; \ - uint16x8_t __s2_606 = __p2_606; \ - uint32x4_t __rev0_606; __rev0_606 = __builtin_shufflevector(__s0_606, __s0_606, 3, 2, 1, 0); \ - uint16x8_t __rev1_606; __rev1_606 = __builtin_shufflevector(__s1_606, __s1_606, 7, 6, 5, 4, 3, 2, 1, 0); \ - uint16x8_t __rev2_606; __rev2_606 = __builtin_shufflevector(__s2_606, __s2_606, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_606 = __rev0_606 - __noswap_vmull_u16(__noswap_vget_high_u16(__rev1_606), __noswap_splat_laneq_u16(__rev2_606, __p3_606)); \ - __ret_606 = __builtin_shufflevector(__ret_606, __ret_606, 3, 2, 1, 0); \ - __ret_606; \ +#define vmlsl_high_laneq_u16(__p0_502, __p1_502, __p2_502, __p3_502) __extension__ ({ \ + uint32x4_t __ret_502; \ + uint32x4_t __s0_502 = __p0_502; \ + uint16x8_t __s1_502 = __p1_502; \ + uint16x8_t __s2_502 = __p2_502; \ + uint32x4_t __rev0_502; __rev0_502 = __builtin_shufflevector(__s0_502, __s0_502, 3, 2, 1, 0); \ + uint16x8_t __rev1_502; __rev1_502 = __builtin_shufflevector(__s1_502, __s1_502, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint16x8_t __rev2_502; __rev2_502 = __builtin_shufflevector(__s2_502, __s2_502, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_502 = __rev0_502 - __noswap_vmull_u16(__noswap_vget_high_u16(__rev1_502), __noswap_splat_laneq_u16(__rev2_502, __p3_502)); \ + __ret_502 = __builtin_shufflevector(__ret_502, __ret_502, 3, 2, 1, 0); \ + __ret_502; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vmlsl_high_laneq_s32(__p0_607, __p1_607, __p2_607, __p3_607) __extension__ ({ \ - int64x2_t __ret_607; \ - int64x2_t __s0_607 = __p0_607; \ - int32x4_t __s1_607 = __p1_607; \ - int32x4_t __s2_607 = __p2_607; \ - __ret_607 = __s0_607 - vmull_s32(vget_high_s32(__s1_607), splat_laneq_s32(__s2_607, __p3_607)); \ - __ret_607; \ +#define vmlsl_high_laneq_s32(__p0_503, __p1_503, __p2_503, __p3_503) __extension__ ({ \ + int64x2_t __ret_503; \ + int64x2_t __s0_503 = __p0_503; \ + int32x4_t __s1_503 = __p1_503; \ + int32x4_t __s2_503 = __p2_503; \ + __ret_503 = __s0_503 - vmull_s32(vget_high_s32(__s1_503), splat_laneq_s32(__s2_503, __p3_503)); \ + __ret_503; \ }) #else -#define vmlsl_high_laneq_s32(__p0_608, __p1_608, __p2_608, __p3_608) __extension__ ({ \ - int64x2_t __ret_608; \ - int64x2_t __s0_608 = __p0_608; \ - int32x4_t __s1_608 = __p1_608; \ - int32x4_t __s2_608 = __p2_608; \ - int64x2_t __rev0_608; __rev0_608 = __builtin_shufflevector(__s0_608, __s0_608, 1, 0); \ - int32x4_t __rev1_608; __rev1_608 = __builtin_shufflevector(__s1_608, __s1_608, 3, 2, 1, 0); \ - int32x4_t __rev2_608; __rev2_608 = __builtin_shufflevector(__s2_608, __s2_608, 3, 2, 1, 0); \ - __ret_608 = __rev0_608 - __noswap_vmull_s32(__noswap_vget_high_s32(__rev1_608), __noswap_splat_laneq_s32(__rev2_608, __p3_608)); \ - __ret_608 = __builtin_shufflevector(__ret_608, __ret_608, 1, 0); \ - __ret_608; \ +#define vmlsl_high_laneq_s32(__p0_504, __p1_504, __p2_504, __p3_504) __extension__ ({ \ + int64x2_t __ret_504; \ + int64x2_t __s0_504 = __p0_504; \ + int32x4_t __s1_504 = __p1_504; \ + int32x4_t __s2_504 = __p2_504; \ + int64x2_t __rev0_504; __rev0_504 = __builtin_shufflevector(__s0_504, __s0_504, 1, 0); \ + int32x4_t __rev1_504; __rev1_504 = __builtin_shufflevector(__s1_504, __s1_504, 3, 2, 1, 0); \ + int32x4_t __rev2_504; __rev2_504 = __builtin_shufflevector(__s2_504, __s2_504, 3, 2, 1, 0); \ + __ret_504 = __rev0_504 - __noswap_vmull_s32(__noswap_vget_high_s32(__rev1_504), __noswap_splat_laneq_s32(__rev2_504, __p3_504)); \ + __ret_504 = __builtin_shufflevector(__ret_504, __ret_504, 1, 0); \ + __ret_504; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vmlsl_high_laneq_s16(__p0_609, __p1_609, __p2_609, __p3_609) __extension__ ({ \ - int32x4_t __ret_609; \ - int32x4_t __s0_609 = __p0_609; \ - int16x8_t __s1_609 = __p1_609; \ - int16x8_t __s2_609 = __p2_609; \ - __ret_609 = __s0_609 - vmull_s16(vget_high_s16(__s1_609), splat_laneq_s16(__s2_609, __p3_609)); \ - __ret_609; \ +#define vmlsl_high_laneq_s16(__p0_505, __p1_505, __p2_505, __p3_505) __extension__ ({ \ + int32x4_t __ret_505; \ + int32x4_t __s0_505 = __p0_505; \ + int16x8_t __s1_505 = __p1_505; \ + int16x8_t __s2_505 = __p2_505; \ + __ret_505 = __s0_505 - vmull_s16(vget_high_s16(__s1_505), splat_laneq_s16(__s2_505, __p3_505)); \ + __ret_505; \ }) #else -#define vmlsl_high_laneq_s16(__p0_610, __p1_610, __p2_610, __p3_610) __extension__ ({ \ - int32x4_t __ret_610; \ - int32x4_t __s0_610 = __p0_610; \ - int16x8_t __s1_610 = __p1_610; \ - int16x8_t __s2_610 = __p2_610; \ - int32x4_t __rev0_610; __rev0_610 = __builtin_shufflevector(__s0_610, __s0_610, 3, 2, 1, 0); \ - int16x8_t __rev1_610; __rev1_610 = __builtin_shufflevector(__s1_610, __s1_610, 7, 6, 5, 4, 3, 2, 1, 0); \ - int16x8_t __rev2_610; __rev2_610 = __builtin_shufflevector(__s2_610, __s2_610, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_610 = __rev0_610 - __noswap_vmull_s16(__noswap_vget_high_s16(__rev1_610), __noswap_splat_laneq_s16(__rev2_610, __p3_610)); \ - __ret_610 = __builtin_shufflevector(__ret_610, __ret_610, 3, 2, 1, 0); \ - __ret_610; \ +#define vmlsl_high_laneq_s16(__p0_506, __p1_506, __p2_506, __p3_506) __extension__ ({ \ + int32x4_t __ret_506; \ + int32x4_t __s0_506 = __p0_506; \ + int16x8_t __s1_506 = __p1_506; \ + int16x8_t __s2_506 = __p2_506; \ + int32x4_t __rev0_506; __rev0_506 = __builtin_shufflevector(__s0_506, __s0_506, 3, 2, 1, 0); \ + int16x8_t __rev1_506; __rev1_506 = __builtin_shufflevector(__s1_506, __s1_506, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __rev2_506; __rev2_506 = __builtin_shufflevector(__s2_506, __s2_506, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_506 = __rev0_506 - __noswap_vmull_s16(__noswap_vget_high_s16(__rev1_506), __noswap_splat_laneq_s16(__rev2_506, __p3_506)); \ + __ret_506 = __builtin_shufflevector(__ret_506, __ret_506, 3, 2, 1, 0); \ + __ret_506; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vmlsl_laneq_u32(__p0_611, __p1_611, __p2_611, __p3_611) __extension__ ({ \ - uint64x2_t __ret_611; \ - uint64x2_t __s0_611 = __p0_611; \ - uint32x2_t __s1_611 = __p1_611; \ - uint32x4_t __s2_611 = __p2_611; \ - __ret_611 = __s0_611 - vmull_u32(__s1_611, splat_laneq_u32(__s2_611, __p3_611)); \ - __ret_611; \ +#define vmlsl_laneq_u32(__p0_507, __p1_507, __p2_507, __p3_507) __extension__ ({ \ + uint64x2_t __ret_507; \ + uint64x2_t __s0_507 = __p0_507; \ + uint32x2_t __s1_507 = __p1_507; \ + uint32x4_t __s2_507 = __p2_507; \ + __ret_507 = __s0_507 - vmull_u32(__s1_507, splat_laneq_u32(__s2_507, __p3_507)); \ + __ret_507; \ }) #else -#define vmlsl_laneq_u32(__p0_612, __p1_612, __p2_612, __p3_612) __extension__ ({ \ - uint64x2_t __ret_612; \ - uint64x2_t __s0_612 = __p0_612; \ - uint32x2_t __s1_612 = __p1_612; \ - uint32x4_t __s2_612 = __p2_612; \ - uint64x2_t __rev0_612; __rev0_612 = __builtin_shufflevector(__s0_612, __s0_612, 1, 0); \ - uint32x2_t __rev1_612; __rev1_612 = __builtin_shufflevector(__s1_612, __s1_612, 1, 0); \ - uint32x4_t __rev2_612; __rev2_612 = __builtin_shufflevector(__s2_612, __s2_612, 3, 2, 1, 0); \ - __ret_612 = __rev0_612 - __noswap_vmull_u32(__rev1_612, __noswap_splat_laneq_u32(__rev2_612, __p3_612)); \ - __ret_612 = __builtin_shufflevector(__ret_612, __ret_612, 1, 0); \ - __ret_612; \ +#define vmlsl_laneq_u32(__p0_508, __p1_508, __p2_508, __p3_508) __extension__ ({ \ + uint64x2_t __ret_508; \ + uint64x2_t __s0_508 = __p0_508; \ + uint32x2_t __s1_508 = __p1_508; \ + uint32x4_t __s2_508 = __p2_508; \ + uint64x2_t __rev0_508; __rev0_508 = __builtin_shufflevector(__s0_508, __s0_508, 1, 0); \ + uint32x2_t __rev1_508; __rev1_508 = __builtin_shufflevector(__s1_508, __s1_508, 1, 0); \ + uint32x4_t __rev2_508; __rev2_508 = __builtin_shufflevector(__s2_508, __s2_508, 3, 2, 1, 0); \ + __ret_508 = __rev0_508 - __noswap_vmull_u32(__rev1_508, __noswap_splat_laneq_u32(__rev2_508, __p3_508)); \ + __ret_508 = __builtin_shufflevector(__ret_508, __ret_508, 1, 0); \ + __ret_508; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vmlsl_laneq_u16(__p0_613, __p1_613, __p2_613, __p3_613) __extension__ ({ \ - uint32x4_t __ret_613; \ - uint32x4_t __s0_613 = __p0_613; \ - uint16x4_t __s1_613 = __p1_613; \ - uint16x8_t __s2_613 = __p2_613; \ - __ret_613 = __s0_613 - vmull_u16(__s1_613, splat_laneq_u16(__s2_613, __p3_613)); \ - __ret_613; \ +#define vmlsl_laneq_u16(__p0_509, __p1_509, __p2_509, __p3_509) __extension__ ({ \ + uint32x4_t __ret_509; \ + uint32x4_t __s0_509 = __p0_509; \ + uint16x4_t __s1_509 = __p1_509; \ + uint16x8_t __s2_509 = __p2_509; \ + __ret_509 = __s0_509 - vmull_u16(__s1_509, splat_laneq_u16(__s2_509, __p3_509)); \ + __ret_509; \ }) #else -#define vmlsl_laneq_u16(__p0_614, __p1_614, __p2_614, __p3_614) __extension__ ({ \ - uint32x4_t __ret_614; \ - uint32x4_t __s0_614 = __p0_614; \ - uint16x4_t __s1_614 = __p1_614; \ - uint16x8_t __s2_614 = __p2_614; \ - uint32x4_t __rev0_614; __rev0_614 = __builtin_shufflevector(__s0_614, __s0_614, 3, 2, 1, 0); \ - uint16x4_t __rev1_614; __rev1_614 = __builtin_shufflevector(__s1_614, __s1_614, 3, 2, 1, 0); \ - uint16x8_t __rev2_614; __rev2_614 = __builtin_shufflevector(__s2_614, __s2_614, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_614 = __rev0_614 - __noswap_vmull_u16(__rev1_614, __noswap_splat_laneq_u16(__rev2_614, __p3_614)); \ - __ret_614 = __builtin_shufflevector(__ret_614, __ret_614, 3, 2, 1, 0); \ - __ret_614; \ +#define vmlsl_laneq_u16(__p0_510, __p1_510, __p2_510, __p3_510) __extension__ ({ \ + uint32x4_t __ret_510; \ + uint32x4_t __s0_510 = __p0_510; \ + uint16x4_t __s1_510 = __p1_510; \ + uint16x8_t __s2_510 = __p2_510; \ + uint32x4_t __rev0_510; __rev0_510 = __builtin_shufflevector(__s0_510, __s0_510, 3, 2, 1, 0); \ + uint16x4_t __rev1_510; __rev1_510 = __builtin_shufflevector(__s1_510, __s1_510, 3, 2, 1, 0); \ + uint16x8_t __rev2_510; __rev2_510 = __builtin_shufflevector(__s2_510, __s2_510, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_510 = __rev0_510 - __noswap_vmull_u16(__rev1_510, __noswap_splat_laneq_u16(__rev2_510, __p3_510)); \ + __ret_510 = __builtin_shufflevector(__ret_510, __ret_510, 3, 2, 1, 0); \ + __ret_510; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vmlsl_laneq_s32(__p0_615, __p1_615, __p2_615, __p3_615) __extension__ ({ \ - int64x2_t __ret_615; \ - int64x2_t __s0_615 = __p0_615; \ - int32x2_t __s1_615 = __p1_615; \ - int32x4_t __s2_615 = __p2_615; \ - __ret_615 = __s0_615 - vmull_s32(__s1_615, splat_laneq_s32(__s2_615, __p3_615)); \ - __ret_615; \ +#define vmlsl_laneq_s32(__p0_511, __p1_511, __p2_511, __p3_511) __extension__ ({ \ + int64x2_t __ret_511; \ + int64x2_t __s0_511 = __p0_511; \ + int32x2_t __s1_511 = __p1_511; \ + int32x4_t __s2_511 = __p2_511; \ + __ret_511 = __s0_511 - vmull_s32(__s1_511, splat_laneq_s32(__s2_511, __p3_511)); \ + __ret_511; \ }) #else -#define vmlsl_laneq_s32(__p0_616, __p1_616, __p2_616, __p3_616) __extension__ ({ \ - int64x2_t __ret_616; \ - int64x2_t __s0_616 = __p0_616; \ - int32x2_t __s1_616 = __p1_616; \ - int32x4_t __s2_616 = __p2_616; \ - int64x2_t __rev0_616; __rev0_616 = __builtin_shufflevector(__s0_616, __s0_616, 1, 0); \ - int32x2_t __rev1_616; __rev1_616 = __builtin_shufflevector(__s1_616, __s1_616, 1, 0); \ - int32x4_t __rev2_616; __rev2_616 = __builtin_shufflevector(__s2_616, __s2_616, 3, 2, 1, 0); \ - __ret_616 = __rev0_616 - __noswap_vmull_s32(__rev1_616, __noswap_splat_laneq_s32(__rev2_616, __p3_616)); \ - __ret_616 = __builtin_shufflevector(__ret_616, __ret_616, 1, 0); \ - __ret_616; \ +#define vmlsl_laneq_s32(__p0_512, __p1_512, __p2_512, __p3_512) __extension__ ({ \ + int64x2_t __ret_512; \ + int64x2_t __s0_512 = __p0_512; \ + int32x2_t __s1_512 = __p1_512; \ + int32x4_t __s2_512 = __p2_512; \ + int64x2_t __rev0_512; __rev0_512 = __builtin_shufflevector(__s0_512, __s0_512, 1, 0); \ + int32x2_t __rev1_512; __rev1_512 = __builtin_shufflevector(__s1_512, __s1_512, 1, 0); \ + int32x4_t __rev2_512; __rev2_512 = __builtin_shufflevector(__s2_512, __s2_512, 3, 2, 1, 0); \ + __ret_512 = __rev0_512 - __noswap_vmull_s32(__rev1_512, __noswap_splat_laneq_s32(__rev2_512, __p3_512)); \ + __ret_512 = __builtin_shufflevector(__ret_512, __ret_512, 1, 0); \ + __ret_512; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vmlsl_laneq_s16(__p0_617, __p1_617, __p2_617, __p3_617) __extension__ ({ \ - int32x4_t __ret_617; \ - int32x4_t __s0_617 = __p0_617; \ - int16x4_t __s1_617 = __p1_617; \ - int16x8_t __s2_617 = __p2_617; \ - __ret_617 = __s0_617 - vmull_s16(__s1_617, splat_laneq_s16(__s2_617, __p3_617)); \ - __ret_617; \ +#define vmlsl_laneq_s16(__p0_513, __p1_513, __p2_513, __p3_513) __extension__ ({ \ + int32x4_t __ret_513; \ + int32x4_t __s0_513 = __p0_513; \ + int16x4_t __s1_513 = __p1_513; \ + int16x8_t __s2_513 = __p2_513; \ + __ret_513 = __s0_513 - vmull_s16(__s1_513, splat_laneq_s16(__s2_513, __p3_513)); \ + __ret_513; \ }) #else -#define vmlsl_laneq_s16(__p0_618, __p1_618, __p2_618, __p3_618) __extension__ ({ \ - int32x4_t __ret_618; \ - int32x4_t __s0_618 = __p0_618; \ - int16x4_t __s1_618 = __p1_618; \ - int16x8_t __s2_618 = __p2_618; \ - int32x4_t __rev0_618; __rev0_618 = __builtin_shufflevector(__s0_618, __s0_618, 3, 2, 1, 0); \ - int16x4_t __rev1_618; __rev1_618 = __builtin_shufflevector(__s1_618, __s1_618, 3, 2, 1, 0); \ - int16x8_t __rev2_618; __rev2_618 = __builtin_shufflevector(__s2_618, __s2_618, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_618 = __rev0_618 - __noswap_vmull_s16(__rev1_618, __noswap_splat_laneq_s16(__rev2_618, __p3_618)); \ - __ret_618 = __builtin_shufflevector(__ret_618, __ret_618, 3, 2, 1, 0); \ - __ret_618; \ +#define vmlsl_laneq_s16(__p0_514, __p1_514, __p2_514, __p3_514) __extension__ ({ \ + int32x4_t __ret_514; \ + int32x4_t __s0_514 = __p0_514; \ + int16x4_t __s1_514 = __p1_514; \ + int16x8_t __s2_514 = __p2_514; \ + int32x4_t __rev0_514; __rev0_514 = __builtin_shufflevector(__s0_514, __s0_514, 3, 2, 1, 0); \ + int16x4_t __rev1_514; __rev1_514 = __builtin_shufflevector(__s1_514, __s1_514, 3, 2, 1, 0); \ + int16x8_t __rev2_514; __rev2_514 = __builtin_shufflevector(__s2_514, __s2_514, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_514 = __rev0_514 - __noswap_vmull_s16(__rev1_514, __noswap_splat_laneq_s16(__rev2_514, __p3_514)); \ + __ret_514 = __builtin_shufflevector(__ret_514, __ret_514, 3, 2, 1, 0); \ + __ret_514; \ }) #endif @@ -56471,146 +50564,146 @@ __ai float64x1_t vmov_n_f64(float64_t __p0) { return __ret; } #ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vmovl_high_u8(uint8x16_t __p0_619) { - uint16x8_t __ret_619; - uint8x8_t __a1_619 = vget_high_u8(__p0_619); - __ret_619 = (uint16x8_t)(vshll_n_u8(__a1_619, 0)); - return __ret_619; +__ai uint16x8_t vmovl_high_u8(uint8x16_t __p0_515) { + uint16x8_t __ret_515; + uint8x8_t __a1_515 = vget_high_u8(__p0_515); + __ret_515 = (uint16x8_t)(vshll_n_u8(__a1_515, 0)); + return __ret_515; } #else -__ai uint16x8_t vmovl_high_u8(uint8x16_t __p0_620) { - uint16x8_t __ret_620; - uint8x16_t __rev0_620; __rev0_620 = __builtin_shufflevector(__p0_620, __p0_620, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __a1_620 = __noswap_vget_high_u8(__rev0_620); - __ret_620 = (uint16x8_t)(__noswap_vshll_n_u8(__a1_620, 0)); - __ret_620 = __builtin_shufflevector(__ret_620, __ret_620, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret_620; +__ai uint16x8_t vmovl_high_u8(uint8x16_t __p0_516) { + uint16x8_t __ret_516; + uint8x16_t __rev0_516; __rev0_516 = __builtin_shufflevector(__p0_516, __p0_516, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __a1_516 = __noswap_vget_high_u8(__rev0_516); + __ret_516 = (uint16x8_t)(__noswap_vshll_n_u8(__a1_516, 0)); + __ret_516 = __builtin_shufflevector(__ret_516, __ret_516, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret_516; } -__ai uint16x8_t __noswap_vmovl_high_u8(uint8x16_t __p0_621) { - uint16x8_t __ret_621; - uint8x8_t __a1_621 = __noswap_vget_high_u8(__p0_621); - __ret_621 = (uint16x8_t)(__noswap_vshll_n_u8(__a1_621, 0)); - return __ret_621; +__ai uint16x8_t __noswap_vmovl_high_u8(uint8x16_t __p0_517) { + uint16x8_t __ret_517; + uint8x8_t __a1_517 = __noswap_vget_high_u8(__p0_517); + __ret_517 = (uint16x8_t)(__noswap_vshll_n_u8(__a1_517, 0)); + return __ret_517; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vmovl_high_u32(uint32x4_t __p0_622) { - uint64x2_t __ret_622; - uint32x2_t __a1_622 = vget_high_u32(__p0_622); - __ret_622 = (uint64x2_t)(vshll_n_u32(__a1_622, 0)); - return __ret_622; +__ai uint64x2_t vmovl_high_u32(uint32x4_t __p0_518) { + uint64x2_t __ret_518; + uint32x2_t __a1_518 = vget_high_u32(__p0_518); + __ret_518 = (uint64x2_t)(vshll_n_u32(__a1_518, 0)); + return __ret_518; } #else -__ai uint64x2_t vmovl_high_u32(uint32x4_t __p0_623) { - uint64x2_t __ret_623; - uint32x4_t __rev0_623; __rev0_623 = __builtin_shufflevector(__p0_623, __p0_623, 3, 2, 1, 0); - uint32x2_t __a1_623 = __noswap_vget_high_u32(__rev0_623); - __ret_623 = (uint64x2_t)(__noswap_vshll_n_u32(__a1_623, 0)); - __ret_623 = __builtin_shufflevector(__ret_623, __ret_623, 1, 0); - return __ret_623; +__ai uint64x2_t vmovl_high_u32(uint32x4_t __p0_519) { + uint64x2_t __ret_519; + uint32x4_t __rev0_519; __rev0_519 = __builtin_shufflevector(__p0_519, __p0_519, 3, 2, 1, 0); + uint32x2_t __a1_519 = __noswap_vget_high_u32(__rev0_519); + __ret_519 = (uint64x2_t)(__noswap_vshll_n_u32(__a1_519, 0)); + __ret_519 = __builtin_shufflevector(__ret_519, __ret_519, 1, 0); + return __ret_519; } -__ai uint64x2_t __noswap_vmovl_high_u32(uint32x4_t __p0_624) { - uint64x2_t __ret_624; - uint32x2_t __a1_624 = __noswap_vget_high_u32(__p0_624); - __ret_624 = (uint64x2_t)(__noswap_vshll_n_u32(__a1_624, 0)); - return __ret_624; +__ai uint64x2_t __noswap_vmovl_high_u32(uint32x4_t __p0_520) { + uint64x2_t __ret_520; + uint32x2_t __a1_520 = __noswap_vget_high_u32(__p0_520); + __ret_520 = (uint64x2_t)(__noswap_vshll_n_u32(__a1_520, 0)); + return __ret_520; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vmovl_high_u16(uint16x8_t __p0_625) { - uint32x4_t __ret_625; - uint16x4_t __a1_625 = vget_high_u16(__p0_625); - __ret_625 = (uint32x4_t)(vshll_n_u16(__a1_625, 0)); - return __ret_625; +__ai uint32x4_t vmovl_high_u16(uint16x8_t __p0_521) { + uint32x4_t __ret_521; + uint16x4_t __a1_521 = vget_high_u16(__p0_521); + __ret_521 = (uint32x4_t)(vshll_n_u16(__a1_521, 0)); + return __ret_521; } #else -__ai uint32x4_t vmovl_high_u16(uint16x8_t __p0_626) { - uint32x4_t __ret_626; - uint16x8_t __rev0_626; __rev0_626 = __builtin_shufflevector(__p0_626, __p0_626, 7, 6, 5, 4, 3, 2, 1, 0); - uint16x4_t __a1_626 = __noswap_vget_high_u16(__rev0_626); - __ret_626 = (uint32x4_t)(__noswap_vshll_n_u16(__a1_626, 0)); - __ret_626 = __builtin_shufflevector(__ret_626, __ret_626, 3, 2, 1, 0); - return __ret_626; +__ai uint32x4_t vmovl_high_u16(uint16x8_t __p0_522) { + uint32x4_t __ret_522; + uint16x8_t __rev0_522; __rev0_522 = __builtin_shufflevector(__p0_522, __p0_522, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x4_t __a1_522 = __noswap_vget_high_u16(__rev0_522); + __ret_522 = (uint32x4_t)(__noswap_vshll_n_u16(__a1_522, 0)); + __ret_522 = __builtin_shufflevector(__ret_522, __ret_522, 3, 2, 1, 0); + return __ret_522; } -__ai uint32x4_t __noswap_vmovl_high_u16(uint16x8_t __p0_627) { - uint32x4_t __ret_627; - uint16x4_t __a1_627 = __noswap_vget_high_u16(__p0_627); - __ret_627 = (uint32x4_t)(__noswap_vshll_n_u16(__a1_627, 0)); - return __ret_627; +__ai uint32x4_t __noswap_vmovl_high_u16(uint16x8_t __p0_523) { + uint32x4_t __ret_523; + uint16x4_t __a1_523 = __noswap_vget_high_u16(__p0_523); + __ret_523 = (uint32x4_t)(__noswap_vshll_n_u16(__a1_523, 0)); + return __ret_523; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vmovl_high_s8(int8x16_t __p0_628) { - int16x8_t __ret_628; - int8x8_t __a1_628 = vget_high_s8(__p0_628); - __ret_628 = (int16x8_t)(vshll_n_s8(__a1_628, 0)); - return __ret_628; +__ai int16x8_t vmovl_high_s8(int8x16_t __p0_524) { + int16x8_t __ret_524; + int8x8_t __a1_524 = vget_high_s8(__p0_524); + __ret_524 = (int16x8_t)(vshll_n_s8(__a1_524, 0)); + return __ret_524; } #else -__ai int16x8_t vmovl_high_s8(int8x16_t __p0_629) { - int16x8_t __ret_629; - int8x16_t __rev0_629; __rev0_629 = __builtin_shufflevector(__p0_629, __p0_629, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - int8x8_t __a1_629 = __noswap_vget_high_s8(__rev0_629); - __ret_629 = (int16x8_t)(__noswap_vshll_n_s8(__a1_629, 0)); - __ret_629 = __builtin_shufflevector(__ret_629, __ret_629, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret_629; +__ai int16x8_t vmovl_high_s8(int8x16_t __p0_525) { + int16x8_t __ret_525; + int8x16_t __rev0_525; __rev0_525 = __builtin_shufflevector(__p0_525, __p0_525, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __a1_525 = __noswap_vget_high_s8(__rev0_525); + __ret_525 = (int16x8_t)(__noswap_vshll_n_s8(__a1_525, 0)); + __ret_525 = __builtin_shufflevector(__ret_525, __ret_525, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret_525; } -__ai int16x8_t __noswap_vmovl_high_s8(int8x16_t __p0_630) { - int16x8_t __ret_630; - int8x8_t __a1_630 = __noswap_vget_high_s8(__p0_630); - __ret_630 = (int16x8_t)(__noswap_vshll_n_s8(__a1_630, 0)); - return __ret_630; +__ai int16x8_t __noswap_vmovl_high_s8(int8x16_t __p0_526) { + int16x8_t __ret_526; + int8x8_t __a1_526 = __noswap_vget_high_s8(__p0_526); + __ret_526 = (int16x8_t)(__noswap_vshll_n_s8(__a1_526, 0)); + return __ret_526; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vmovl_high_s32(int32x4_t __p0_631) { - int64x2_t __ret_631; - int32x2_t __a1_631 = vget_high_s32(__p0_631); - __ret_631 = (int64x2_t)(vshll_n_s32(__a1_631, 0)); - return __ret_631; +__ai int64x2_t vmovl_high_s32(int32x4_t __p0_527) { + int64x2_t __ret_527; + int32x2_t __a1_527 = vget_high_s32(__p0_527); + __ret_527 = (int64x2_t)(vshll_n_s32(__a1_527, 0)); + return __ret_527; } #else -__ai int64x2_t vmovl_high_s32(int32x4_t __p0_632) { - int64x2_t __ret_632; - int32x4_t __rev0_632; __rev0_632 = __builtin_shufflevector(__p0_632, __p0_632, 3, 2, 1, 0); - int32x2_t __a1_632 = __noswap_vget_high_s32(__rev0_632); - __ret_632 = (int64x2_t)(__noswap_vshll_n_s32(__a1_632, 0)); - __ret_632 = __builtin_shufflevector(__ret_632, __ret_632, 1, 0); - return __ret_632; +__ai int64x2_t vmovl_high_s32(int32x4_t __p0_528) { + int64x2_t __ret_528; + int32x4_t __rev0_528; __rev0_528 = __builtin_shufflevector(__p0_528, __p0_528, 3, 2, 1, 0); + int32x2_t __a1_528 = __noswap_vget_high_s32(__rev0_528); + __ret_528 = (int64x2_t)(__noswap_vshll_n_s32(__a1_528, 0)); + __ret_528 = __builtin_shufflevector(__ret_528, __ret_528, 1, 0); + return __ret_528; } -__ai int64x2_t __noswap_vmovl_high_s32(int32x4_t __p0_633) { - int64x2_t __ret_633; - int32x2_t __a1_633 = __noswap_vget_high_s32(__p0_633); - __ret_633 = (int64x2_t)(__noswap_vshll_n_s32(__a1_633, 0)); - return __ret_633; +__ai int64x2_t __noswap_vmovl_high_s32(int32x4_t __p0_529) { + int64x2_t __ret_529; + int32x2_t __a1_529 = __noswap_vget_high_s32(__p0_529); + __ret_529 = (int64x2_t)(__noswap_vshll_n_s32(__a1_529, 0)); + return __ret_529; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vmovl_high_s16(int16x8_t __p0_634) { - int32x4_t __ret_634; - int16x4_t __a1_634 = vget_high_s16(__p0_634); - __ret_634 = (int32x4_t)(vshll_n_s16(__a1_634, 0)); - return __ret_634; +__ai int32x4_t vmovl_high_s16(int16x8_t __p0_530) { + int32x4_t __ret_530; + int16x4_t __a1_530 = vget_high_s16(__p0_530); + __ret_530 = (int32x4_t)(vshll_n_s16(__a1_530, 0)); + return __ret_530; } #else -__ai int32x4_t vmovl_high_s16(int16x8_t __p0_635) { - int32x4_t __ret_635; - int16x8_t __rev0_635; __rev0_635 = __builtin_shufflevector(__p0_635, __p0_635, 7, 6, 5, 4, 3, 2, 1, 0); - int16x4_t __a1_635 = __noswap_vget_high_s16(__rev0_635); - __ret_635 = (int32x4_t)(__noswap_vshll_n_s16(__a1_635, 0)); - __ret_635 = __builtin_shufflevector(__ret_635, __ret_635, 3, 2, 1, 0); - return __ret_635; +__ai int32x4_t vmovl_high_s16(int16x8_t __p0_531) { + int32x4_t __ret_531; + int16x8_t __rev0_531; __rev0_531 = __builtin_shufflevector(__p0_531, __p0_531, 7, 6, 5, 4, 3, 2, 1, 0); + int16x4_t __a1_531 = __noswap_vget_high_s16(__rev0_531); + __ret_531 = (int32x4_t)(__noswap_vshll_n_s16(__a1_531, 0)); + __ret_531 = __builtin_shufflevector(__ret_531, __ret_531, 3, 2, 1, 0); + return __ret_531; } -__ai int32x4_t __noswap_vmovl_high_s16(int16x8_t __p0_636) { - int32x4_t __ret_636; - int16x4_t __a1_636 = __noswap_vget_high_s16(__p0_636); - __ret_636 = (int32x4_t)(__noswap_vshll_n_s16(__a1_636, 0)); - return __ret_636; +__ai int32x4_t __noswap_vmovl_high_s16(int16x8_t __p0_532) { + int32x4_t __ret_532; + int16x4_t __a1_532 = __noswap_vget_high_s16(__p0_532); + __ret_532 = (int32x4_t)(__noswap_vshll_n_s16(__a1_532, 0)); + return __ret_532; } #endif @@ -56738,29 +50831,29 @@ __ai float64x1_t vmul_f64(float64x1_t __p0, float64x1_t __p1) { __ret = __p0 * __p1; return __ret; } -#define vmuld_lane_f64(__p0_637, __p1_637, __p2_637) __extension__ ({ \ - float64_t __ret_637; \ - float64_t __s0_637 = __p0_637; \ - float64x1_t __s1_637 = __p1_637; \ - __ret_637 = __s0_637 * vget_lane_f64(__s1_637, __p2_637); \ - __ret_637; \ +#define vmuld_lane_f64(__p0_533, __p1_533, __p2_533) __extension__ ({ \ + float64_t __ret_533; \ + float64_t __s0_533 = __p0_533; \ + float64x1_t __s1_533 = __p1_533; \ + __ret_533 = __s0_533 * vget_lane_f64(__s1_533, __p2_533); \ + __ret_533; \ }) #ifdef __LITTLE_ENDIAN__ -#define vmuls_lane_f32(__p0_638, __p1_638, __p2_638) __extension__ ({ \ - float32_t __ret_638; \ - float32_t __s0_638 = __p0_638; \ - float32x2_t __s1_638 = __p1_638; \ - __ret_638 = __s0_638 * vget_lane_f32(__s1_638, __p2_638); \ - __ret_638; \ +#define vmuls_lane_f32(__p0_534, __p1_534, __p2_534) __extension__ ({ \ + float32_t __ret_534; \ + float32_t __s0_534 = __p0_534; \ + float32x2_t __s1_534 = __p1_534; \ + __ret_534 = __s0_534 * vget_lane_f32(__s1_534, __p2_534); \ + __ret_534; \ }) #else -#define vmuls_lane_f32(__p0_639, __p1_639, __p2_639) __extension__ ({ \ - float32_t __ret_639; \ - float32_t __s0_639 = __p0_639; \ - float32x2_t __s1_639 = __p1_639; \ - float32x2_t __rev1_639; __rev1_639 = __builtin_shufflevector(__s1_639, __s1_639, 1, 0); \ - __ret_639 = __s0_639 * __noswap_vget_lane_f32(__rev1_639, __p2_639); \ - __ret_639; \ +#define vmuls_lane_f32(__p0_535, __p1_535, __p2_535) __extension__ ({ \ + float32_t __ret_535; \ + float32_t __s0_535 = __p0_535; \ + float32x2_t __s1_535 = __p1_535; \ + float32x2_t __rev1_535; __rev1_535 = __builtin_shufflevector(__s1_535, __s1_535, 1, 0); \ + __ret_535 = __s0_535 * __noswap_vget_lane_f32(__rev1_535, __p2_535); \ + __ret_535; \ }) #endif @@ -56772,60 +50865,60 @@ __ai float64x1_t vmul_f64(float64x1_t __p0, float64x1_t __p1) { __ret; \ }) #ifdef __LITTLE_ENDIAN__ -#define vmulq_lane_f64(__p0_640, __p1_640, __p2_640) __extension__ ({ \ - float64x2_t __ret_640; \ - float64x2_t __s0_640 = __p0_640; \ - float64x1_t __s1_640 = __p1_640; \ - __ret_640 = __s0_640 * splatq_lane_f64(__s1_640, __p2_640); \ - __ret_640; \ +#define vmulq_lane_f64(__p0_536, __p1_536, __p2_536) __extension__ ({ \ + float64x2_t __ret_536; \ + float64x2_t __s0_536 = __p0_536; \ + float64x1_t __s1_536 = __p1_536; \ + __ret_536 = __s0_536 * splatq_lane_f64(__s1_536, __p2_536); \ + __ret_536; \ }) #else -#define vmulq_lane_f64(__p0_641, __p1_641, __p2_641) __extension__ ({ \ - float64x2_t __ret_641; \ - float64x2_t __s0_641 = __p0_641; \ - float64x1_t __s1_641 = __p1_641; \ - float64x2_t __rev0_641; __rev0_641 = __builtin_shufflevector(__s0_641, __s0_641, 1, 0); \ - __ret_641 = __rev0_641 * __noswap_splatq_lane_f64(__s1_641, __p2_641); \ - __ret_641 = __builtin_shufflevector(__ret_641, __ret_641, 1, 0); \ - __ret_641; \ +#define vmulq_lane_f64(__p0_537, __p1_537, __p2_537) __extension__ ({ \ + float64x2_t __ret_537; \ + float64x2_t __s0_537 = __p0_537; \ + float64x1_t __s1_537 = __p1_537; \ + float64x2_t __rev0_537; __rev0_537 = __builtin_shufflevector(__s0_537, __s0_537, 1, 0); \ + __ret_537 = __rev0_537 * __noswap_splatq_lane_f64(__s1_537, __p2_537); \ + __ret_537 = __builtin_shufflevector(__ret_537, __ret_537, 1, 0); \ + __ret_537; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vmuld_laneq_f64(__p0_642, __p1_642, __p2_642) __extension__ ({ \ - float64_t __ret_642; \ - float64_t __s0_642 = __p0_642; \ - float64x2_t __s1_642 = __p1_642; \ - __ret_642 = __s0_642 * vgetq_lane_f64(__s1_642, __p2_642); \ - __ret_642; \ +#define vmuld_laneq_f64(__p0_538, __p1_538, __p2_538) __extension__ ({ \ + float64_t __ret_538; \ + float64_t __s0_538 = __p0_538; \ + float64x2_t __s1_538 = __p1_538; \ + __ret_538 = __s0_538 * vgetq_lane_f64(__s1_538, __p2_538); \ + __ret_538; \ }) #else -#define vmuld_laneq_f64(__p0_643, __p1_643, __p2_643) __extension__ ({ \ - float64_t __ret_643; \ - float64_t __s0_643 = __p0_643; \ - float64x2_t __s1_643 = __p1_643; \ - float64x2_t __rev1_643; __rev1_643 = __builtin_shufflevector(__s1_643, __s1_643, 1, 0); \ - __ret_643 = __s0_643 * __noswap_vgetq_lane_f64(__rev1_643, __p2_643); \ - __ret_643; \ +#define vmuld_laneq_f64(__p0_539, __p1_539, __p2_539) __extension__ ({ \ + float64_t __ret_539; \ + float64_t __s0_539 = __p0_539; \ + float64x2_t __s1_539 = __p1_539; \ + float64x2_t __rev1_539; __rev1_539 = __builtin_shufflevector(__s1_539, __s1_539, 1, 0); \ + __ret_539 = __s0_539 * __noswap_vgetq_lane_f64(__rev1_539, __p2_539); \ + __ret_539; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vmuls_laneq_f32(__p0_644, __p1_644, __p2_644) __extension__ ({ \ - float32_t __ret_644; \ - float32_t __s0_644 = __p0_644; \ - float32x4_t __s1_644 = __p1_644; \ - __ret_644 = __s0_644 * vgetq_lane_f32(__s1_644, __p2_644); \ - __ret_644; \ +#define vmuls_laneq_f32(__p0_540, __p1_540, __p2_540) __extension__ ({ \ + float32_t __ret_540; \ + float32_t __s0_540 = __p0_540; \ + float32x4_t __s1_540 = __p1_540; \ + __ret_540 = __s0_540 * vgetq_lane_f32(__s1_540, __p2_540); \ + __ret_540; \ }) #else -#define vmuls_laneq_f32(__p0_645, __p1_645, __p2_645) __extension__ ({ \ - float32_t __ret_645; \ - float32_t __s0_645 = __p0_645; \ - float32x4_t __s1_645 = __p1_645; \ - float32x4_t __rev1_645; __rev1_645 = __builtin_shufflevector(__s1_645, __s1_645, 3, 2, 1, 0); \ - __ret_645 = __s0_645 * __noswap_vgetq_lane_f32(__rev1_645, __p2_645); \ - __ret_645; \ +#define vmuls_laneq_f32(__p0_541, __p1_541, __p2_541) __extension__ ({ \ + float32_t __ret_541; \ + float32_t __s0_541 = __p0_541; \ + float32x4_t __s1_541 = __p1_541; \ + float32x4_t __rev1_541; __rev1_541 = __builtin_shufflevector(__s1_541, __s1_541, 3, 2, 1, 0); \ + __ret_541 = __s0_541 * __noswap_vgetq_lane_f32(__rev1_541, __p2_541); \ + __ret_541; \ }) #endif @@ -56849,233 +50942,233 @@ __ai float64x1_t vmul_f64(float64x1_t __p0, float64x1_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -#define vmulq_laneq_u32(__p0_646, __p1_646, __p2_646) __extension__ ({ \ - uint32x4_t __ret_646; \ - uint32x4_t __s0_646 = __p0_646; \ - uint32x4_t __s1_646 = __p1_646; \ - __ret_646 = __s0_646 * splatq_laneq_u32(__s1_646, __p2_646); \ - __ret_646; \ +#define vmulq_laneq_u32(__p0_542, __p1_542, __p2_542) __extension__ ({ \ + uint32x4_t __ret_542; \ + uint32x4_t __s0_542 = __p0_542; \ + uint32x4_t __s1_542 = __p1_542; \ + __ret_542 = __s0_542 * splatq_laneq_u32(__s1_542, __p2_542); \ + __ret_542; \ }) #else -#define vmulq_laneq_u32(__p0_647, __p1_647, __p2_647) __extension__ ({ \ - uint32x4_t __ret_647; \ - uint32x4_t __s0_647 = __p0_647; \ - uint32x4_t __s1_647 = __p1_647; \ - uint32x4_t __rev0_647; __rev0_647 = __builtin_shufflevector(__s0_647, __s0_647, 3, 2, 1, 0); \ - uint32x4_t __rev1_647; __rev1_647 = __builtin_shufflevector(__s1_647, __s1_647, 3, 2, 1, 0); \ - __ret_647 = __rev0_647 * __noswap_splatq_laneq_u32(__rev1_647, __p2_647); \ - __ret_647 = __builtin_shufflevector(__ret_647, __ret_647, 3, 2, 1, 0); \ - __ret_647; \ +#define vmulq_laneq_u32(__p0_543, __p1_543, __p2_543) __extension__ ({ \ + uint32x4_t __ret_543; \ + uint32x4_t __s0_543 = __p0_543; \ + uint32x4_t __s1_543 = __p1_543; \ + uint32x4_t __rev0_543; __rev0_543 = __builtin_shufflevector(__s0_543, __s0_543, 3, 2, 1, 0); \ + uint32x4_t __rev1_543; __rev1_543 = __builtin_shufflevector(__s1_543, __s1_543, 3, 2, 1, 0); \ + __ret_543 = __rev0_543 * __noswap_splatq_laneq_u32(__rev1_543, __p2_543); \ + __ret_543 = __builtin_shufflevector(__ret_543, __ret_543, 3, 2, 1, 0); \ + __ret_543; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vmulq_laneq_u16(__p0_648, __p1_648, __p2_648) __extension__ ({ \ - uint16x8_t __ret_648; \ - uint16x8_t __s0_648 = __p0_648; \ - uint16x8_t __s1_648 = __p1_648; \ - __ret_648 = __s0_648 * splatq_laneq_u16(__s1_648, __p2_648); \ - __ret_648; \ +#define vmulq_laneq_u16(__p0_544, __p1_544, __p2_544) __extension__ ({ \ + uint16x8_t __ret_544; \ + uint16x8_t __s0_544 = __p0_544; \ + uint16x8_t __s1_544 = __p1_544; \ + __ret_544 = __s0_544 * splatq_laneq_u16(__s1_544, __p2_544); \ + __ret_544; \ }) #else -#define vmulq_laneq_u16(__p0_649, __p1_649, __p2_649) __extension__ ({ \ - uint16x8_t __ret_649; \ - uint16x8_t __s0_649 = __p0_649; \ - uint16x8_t __s1_649 = __p1_649; \ - uint16x8_t __rev0_649; __rev0_649 = __builtin_shufflevector(__s0_649, __s0_649, 7, 6, 5, 4, 3, 2, 1, 0); \ - uint16x8_t __rev1_649; __rev1_649 = __builtin_shufflevector(__s1_649, __s1_649, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_649 = __rev0_649 * __noswap_splatq_laneq_u16(__rev1_649, __p2_649); \ - __ret_649 = __builtin_shufflevector(__ret_649, __ret_649, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_649; \ +#define vmulq_laneq_u16(__p0_545, __p1_545, __p2_545) __extension__ ({ \ + uint16x8_t __ret_545; \ + uint16x8_t __s0_545 = __p0_545; \ + uint16x8_t __s1_545 = __p1_545; \ + uint16x8_t __rev0_545; __rev0_545 = __builtin_shufflevector(__s0_545, __s0_545, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint16x8_t __rev1_545; __rev1_545 = __builtin_shufflevector(__s1_545, __s1_545, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_545 = __rev0_545 * __noswap_splatq_laneq_u16(__rev1_545, __p2_545); \ + __ret_545 = __builtin_shufflevector(__ret_545, __ret_545, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_545; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vmulq_laneq_f64(__p0_650, __p1_650, __p2_650) __extension__ ({ \ - float64x2_t __ret_650; \ - float64x2_t __s0_650 = __p0_650; \ - float64x2_t __s1_650 = __p1_650; \ - __ret_650 = __s0_650 * splatq_laneq_f64(__s1_650, __p2_650); \ - __ret_650; \ +#define vmulq_laneq_f64(__p0_546, __p1_546, __p2_546) __extension__ ({ \ + float64x2_t __ret_546; \ + float64x2_t __s0_546 = __p0_546; \ + float64x2_t __s1_546 = __p1_546; \ + __ret_546 = __s0_546 * splatq_laneq_f64(__s1_546, __p2_546); \ + __ret_546; \ }) #else -#define vmulq_laneq_f64(__p0_651, __p1_651, __p2_651) __extension__ ({ \ - float64x2_t __ret_651; \ - float64x2_t __s0_651 = __p0_651; \ - float64x2_t __s1_651 = __p1_651; \ - float64x2_t __rev0_651; __rev0_651 = __builtin_shufflevector(__s0_651, __s0_651, 1, 0); \ - float64x2_t __rev1_651; __rev1_651 = __builtin_shufflevector(__s1_651, __s1_651, 1, 0); \ - __ret_651 = __rev0_651 * __noswap_splatq_laneq_f64(__rev1_651, __p2_651); \ - __ret_651 = __builtin_shufflevector(__ret_651, __ret_651, 1, 0); \ - __ret_651; \ +#define vmulq_laneq_f64(__p0_547, __p1_547, __p2_547) __extension__ ({ \ + float64x2_t __ret_547; \ + float64x2_t __s0_547 = __p0_547; \ + float64x2_t __s1_547 = __p1_547; \ + float64x2_t __rev0_547; __rev0_547 = __builtin_shufflevector(__s0_547, __s0_547, 1, 0); \ + float64x2_t __rev1_547; __rev1_547 = __builtin_shufflevector(__s1_547, __s1_547, 1, 0); \ + __ret_547 = __rev0_547 * __noswap_splatq_laneq_f64(__rev1_547, __p2_547); \ + __ret_547 = __builtin_shufflevector(__ret_547, __ret_547, 1, 0); \ + __ret_547; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vmulq_laneq_f32(__p0_652, __p1_652, __p2_652) __extension__ ({ \ - float32x4_t __ret_652; \ - float32x4_t __s0_652 = __p0_652; \ - float32x4_t __s1_652 = __p1_652; \ - __ret_652 = __s0_652 * splatq_laneq_f32(__s1_652, __p2_652); \ - __ret_652; \ +#define vmulq_laneq_f32(__p0_548, __p1_548, __p2_548) __extension__ ({ \ + float32x4_t __ret_548; \ + float32x4_t __s0_548 = __p0_548; \ + float32x4_t __s1_548 = __p1_548; \ + __ret_548 = __s0_548 * splatq_laneq_f32(__s1_548, __p2_548); \ + __ret_548; \ }) #else -#define vmulq_laneq_f32(__p0_653, __p1_653, __p2_653) __extension__ ({ \ - float32x4_t __ret_653; \ - float32x4_t __s0_653 = __p0_653; \ - float32x4_t __s1_653 = __p1_653; \ - float32x4_t __rev0_653; __rev0_653 = __builtin_shufflevector(__s0_653, __s0_653, 3, 2, 1, 0); \ - float32x4_t __rev1_653; __rev1_653 = __builtin_shufflevector(__s1_653, __s1_653, 3, 2, 1, 0); \ - __ret_653 = __rev0_653 * __noswap_splatq_laneq_f32(__rev1_653, __p2_653); \ - __ret_653 = __builtin_shufflevector(__ret_653, __ret_653, 3, 2, 1, 0); \ - __ret_653; \ +#define vmulq_laneq_f32(__p0_549, __p1_549, __p2_549) __extension__ ({ \ + float32x4_t __ret_549; \ + float32x4_t __s0_549 = __p0_549; \ + float32x4_t __s1_549 = __p1_549; \ + float32x4_t __rev0_549; __rev0_549 = __builtin_shufflevector(__s0_549, __s0_549, 3, 2, 1, 0); \ + float32x4_t __rev1_549; __rev1_549 = __builtin_shufflevector(__s1_549, __s1_549, 3, 2, 1, 0); \ + __ret_549 = __rev0_549 * __noswap_splatq_laneq_f32(__rev1_549, __p2_549); \ + __ret_549 = __builtin_shufflevector(__ret_549, __ret_549, 3, 2, 1, 0); \ + __ret_549; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vmulq_laneq_s32(__p0_654, __p1_654, __p2_654) __extension__ ({ \ - int32x4_t __ret_654; \ - int32x4_t __s0_654 = __p0_654; \ - int32x4_t __s1_654 = __p1_654; \ - __ret_654 = __s0_654 * splatq_laneq_s32(__s1_654, __p2_654); \ - __ret_654; \ +#define vmulq_laneq_s32(__p0_550, __p1_550, __p2_550) __extension__ ({ \ + int32x4_t __ret_550; \ + int32x4_t __s0_550 = __p0_550; \ + int32x4_t __s1_550 = __p1_550; \ + __ret_550 = __s0_550 * splatq_laneq_s32(__s1_550, __p2_550); \ + __ret_550; \ }) #else -#define vmulq_laneq_s32(__p0_655, __p1_655, __p2_655) __extension__ ({ \ - int32x4_t __ret_655; \ - int32x4_t __s0_655 = __p0_655; \ - int32x4_t __s1_655 = __p1_655; \ - int32x4_t __rev0_655; __rev0_655 = __builtin_shufflevector(__s0_655, __s0_655, 3, 2, 1, 0); \ - int32x4_t __rev1_655; __rev1_655 = __builtin_shufflevector(__s1_655, __s1_655, 3, 2, 1, 0); \ - __ret_655 = __rev0_655 * __noswap_splatq_laneq_s32(__rev1_655, __p2_655); \ - __ret_655 = __builtin_shufflevector(__ret_655, __ret_655, 3, 2, 1, 0); \ - __ret_655; \ +#define vmulq_laneq_s32(__p0_551, __p1_551, __p2_551) __extension__ ({ \ + int32x4_t __ret_551; \ + int32x4_t __s0_551 = __p0_551; \ + int32x4_t __s1_551 = __p1_551; \ + int32x4_t __rev0_551; __rev0_551 = __builtin_shufflevector(__s0_551, __s0_551, 3, 2, 1, 0); \ + int32x4_t __rev1_551; __rev1_551 = __builtin_shufflevector(__s1_551, __s1_551, 3, 2, 1, 0); \ + __ret_551 = __rev0_551 * __noswap_splatq_laneq_s32(__rev1_551, __p2_551); \ + __ret_551 = __builtin_shufflevector(__ret_551, __ret_551, 3, 2, 1, 0); \ + __ret_551; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vmulq_laneq_s16(__p0_656, __p1_656, __p2_656) __extension__ ({ \ - int16x8_t __ret_656; \ - int16x8_t __s0_656 = __p0_656; \ - int16x8_t __s1_656 = __p1_656; \ - __ret_656 = __s0_656 * splatq_laneq_s16(__s1_656, __p2_656); \ - __ret_656; \ +#define vmulq_laneq_s16(__p0_552, __p1_552, __p2_552) __extension__ ({ \ + int16x8_t __ret_552; \ + int16x8_t __s0_552 = __p0_552; \ + int16x8_t __s1_552 = __p1_552; \ + __ret_552 = __s0_552 * splatq_laneq_s16(__s1_552, __p2_552); \ + __ret_552; \ }) #else -#define vmulq_laneq_s16(__p0_657, __p1_657, __p2_657) __extension__ ({ \ - int16x8_t __ret_657; \ - int16x8_t __s0_657 = __p0_657; \ - int16x8_t __s1_657 = __p1_657; \ - int16x8_t __rev0_657; __rev0_657 = __builtin_shufflevector(__s0_657, __s0_657, 7, 6, 5, 4, 3, 2, 1, 0); \ - int16x8_t __rev1_657; __rev1_657 = __builtin_shufflevector(__s1_657, __s1_657, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_657 = __rev0_657 * __noswap_splatq_laneq_s16(__rev1_657, __p2_657); \ - __ret_657 = __builtin_shufflevector(__ret_657, __ret_657, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_657; \ +#define vmulq_laneq_s16(__p0_553, __p1_553, __p2_553) __extension__ ({ \ + int16x8_t __ret_553; \ + int16x8_t __s0_553 = __p0_553; \ + int16x8_t __s1_553 = __p1_553; \ + int16x8_t __rev0_553; __rev0_553 = __builtin_shufflevector(__s0_553, __s0_553, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __rev1_553; __rev1_553 = __builtin_shufflevector(__s1_553, __s1_553, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_553 = __rev0_553 * __noswap_splatq_laneq_s16(__rev1_553, __p2_553); \ + __ret_553 = __builtin_shufflevector(__ret_553, __ret_553, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_553; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vmul_laneq_u32(__p0_658, __p1_658, __p2_658) __extension__ ({ \ - uint32x2_t __ret_658; \ - uint32x2_t __s0_658 = __p0_658; \ - uint32x4_t __s1_658 = __p1_658; \ - __ret_658 = __s0_658 * splat_laneq_u32(__s1_658, __p2_658); \ - __ret_658; \ +#define vmul_laneq_u32(__p0_554, __p1_554, __p2_554) __extension__ ({ \ + uint32x2_t __ret_554; \ + uint32x2_t __s0_554 = __p0_554; \ + uint32x4_t __s1_554 = __p1_554; \ + __ret_554 = __s0_554 * splat_laneq_u32(__s1_554, __p2_554); \ + __ret_554; \ }) #else -#define vmul_laneq_u32(__p0_659, __p1_659, __p2_659) __extension__ ({ \ - uint32x2_t __ret_659; \ - uint32x2_t __s0_659 = __p0_659; \ - uint32x4_t __s1_659 = __p1_659; \ - uint32x2_t __rev0_659; __rev0_659 = __builtin_shufflevector(__s0_659, __s0_659, 1, 0); \ - uint32x4_t __rev1_659; __rev1_659 = __builtin_shufflevector(__s1_659, __s1_659, 3, 2, 1, 0); \ - __ret_659 = __rev0_659 * __noswap_splat_laneq_u32(__rev1_659, __p2_659); \ - __ret_659 = __builtin_shufflevector(__ret_659, __ret_659, 1, 0); \ - __ret_659; \ +#define vmul_laneq_u32(__p0_555, __p1_555, __p2_555) __extension__ ({ \ + uint32x2_t __ret_555; \ + uint32x2_t __s0_555 = __p0_555; \ + uint32x4_t __s1_555 = __p1_555; \ + uint32x2_t __rev0_555; __rev0_555 = __builtin_shufflevector(__s0_555, __s0_555, 1, 0); \ + uint32x4_t __rev1_555; __rev1_555 = __builtin_shufflevector(__s1_555, __s1_555, 3, 2, 1, 0); \ + __ret_555 = __rev0_555 * __noswap_splat_laneq_u32(__rev1_555, __p2_555); \ + __ret_555 = __builtin_shufflevector(__ret_555, __ret_555, 1, 0); \ + __ret_555; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vmul_laneq_u16(__p0_660, __p1_660, __p2_660) __extension__ ({ \ - uint16x4_t __ret_660; \ - uint16x4_t __s0_660 = __p0_660; \ - uint16x8_t __s1_660 = __p1_660; \ - __ret_660 = __s0_660 * splat_laneq_u16(__s1_660, __p2_660); \ - __ret_660; \ +#define vmul_laneq_u16(__p0_556, __p1_556, __p2_556) __extension__ ({ \ + uint16x4_t __ret_556; \ + uint16x4_t __s0_556 = __p0_556; \ + uint16x8_t __s1_556 = __p1_556; \ + __ret_556 = __s0_556 * splat_laneq_u16(__s1_556, __p2_556); \ + __ret_556; \ }) #else -#define vmul_laneq_u16(__p0_661, __p1_661, __p2_661) __extension__ ({ \ - uint16x4_t __ret_661; \ - uint16x4_t __s0_661 = __p0_661; \ - uint16x8_t __s1_661 = __p1_661; \ - uint16x4_t __rev0_661; __rev0_661 = __builtin_shufflevector(__s0_661, __s0_661, 3, 2, 1, 0); \ - uint16x8_t __rev1_661; __rev1_661 = __builtin_shufflevector(__s1_661, __s1_661, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_661 = __rev0_661 * __noswap_splat_laneq_u16(__rev1_661, __p2_661); \ - __ret_661 = __builtin_shufflevector(__ret_661, __ret_661, 3, 2, 1, 0); \ - __ret_661; \ +#define vmul_laneq_u16(__p0_557, __p1_557, __p2_557) __extension__ ({ \ + uint16x4_t __ret_557; \ + uint16x4_t __s0_557 = __p0_557; \ + uint16x8_t __s1_557 = __p1_557; \ + uint16x4_t __rev0_557; __rev0_557 = __builtin_shufflevector(__s0_557, __s0_557, 3, 2, 1, 0); \ + uint16x8_t __rev1_557; __rev1_557 = __builtin_shufflevector(__s1_557, __s1_557, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_557 = __rev0_557 * __noswap_splat_laneq_u16(__rev1_557, __p2_557); \ + __ret_557 = __builtin_shufflevector(__ret_557, __ret_557, 3, 2, 1, 0); \ + __ret_557; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vmul_laneq_f32(__p0_662, __p1_662, __p2_662) __extension__ ({ \ - float32x2_t __ret_662; \ - float32x2_t __s0_662 = __p0_662; \ - float32x4_t __s1_662 = __p1_662; \ - __ret_662 = __s0_662 * splat_laneq_f32(__s1_662, __p2_662); \ - __ret_662; \ +#define vmul_laneq_f32(__p0_558, __p1_558, __p2_558) __extension__ ({ \ + float32x2_t __ret_558; \ + float32x2_t __s0_558 = __p0_558; \ + float32x4_t __s1_558 = __p1_558; \ + __ret_558 = __s0_558 * splat_laneq_f32(__s1_558, __p2_558); \ + __ret_558; \ }) #else -#define vmul_laneq_f32(__p0_663, __p1_663, __p2_663) __extension__ ({ \ - float32x2_t __ret_663; \ - float32x2_t __s0_663 = __p0_663; \ - float32x4_t __s1_663 = __p1_663; \ - float32x2_t __rev0_663; __rev0_663 = __builtin_shufflevector(__s0_663, __s0_663, 1, 0); \ - float32x4_t __rev1_663; __rev1_663 = __builtin_shufflevector(__s1_663, __s1_663, 3, 2, 1, 0); \ - __ret_663 = __rev0_663 * __noswap_splat_laneq_f32(__rev1_663, __p2_663); \ - __ret_663 = __builtin_shufflevector(__ret_663, __ret_663, 1, 0); \ - __ret_663; \ +#define vmul_laneq_f32(__p0_559, __p1_559, __p2_559) __extension__ ({ \ + float32x2_t __ret_559; \ + float32x2_t __s0_559 = __p0_559; \ + float32x4_t __s1_559 = __p1_559; \ + float32x2_t __rev0_559; __rev0_559 = __builtin_shufflevector(__s0_559, __s0_559, 1, 0); \ + float32x4_t __rev1_559; __rev1_559 = __builtin_shufflevector(__s1_559, __s1_559, 3, 2, 1, 0); \ + __ret_559 = __rev0_559 * __noswap_splat_laneq_f32(__rev1_559, __p2_559); \ + __ret_559 = __builtin_shufflevector(__ret_559, __ret_559, 1, 0); \ + __ret_559; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vmul_laneq_s32(__p0_664, __p1_664, __p2_664) __extension__ ({ \ - int32x2_t __ret_664; \ - int32x2_t __s0_664 = __p0_664; \ - int32x4_t __s1_664 = __p1_664; \ - __ret_664 = __s0_664 * splat_laneq_s32(__s1_664, __p2_664); \ - __ret_664; \ +#define vmul_laneq_s32(__p0_560, __p1_560, __p2_560) __extension__ ({ \ + int32x2_t __ret_560; \ + int32x2_t __s0_560 = __p0_560; \ + int32x4_t __s1_560 = __p1_560; \ + __ret_560 = __s0_560 * splat_laneq_s32(__s1_560, __p2_560); \ + __ret_560; \ }) #else -#define vmul_laneq_s32(__p0_665, __p1_665, __p2_665) __extension__ ({ \ - int32x2_t __ret_665; \ - int32x2_t __s0_665 = __p0_665; \ - int32x4_t __s1_665 = __p1_665; \ - int32x2_t __rev0_665; __rev0_665 = __builtin_shufflevector(__s0_665, __s0_665, 1, 0); \ - int32x4_t __rev1_665; __rev1_665 = __builtin_shufflevector(__s1_665, __s1_665, 3, 2, 1, 0); \ - __ret_665 = __rev0_665 * __noswap_splat_laneq_s32(__rev1_665, __p2_665); \ - __ret_665 = __builtin_shufflevector(__ret_665, __ret_665, 1, 0); \ - __ret_665; \ +#define vmul_laneq_s32(__p0_561, __p1_561, __p2_561) __extension__ ({ \ + int32x2_t __ret_561; \ + int32x2_t __s0_561 = __p0_561; \ + int32x4_t __s1_561 = __p1_561; \ + int32x2_t __rev0_561; __rev0_561 = __builtin_shufflevector(__s0_561, __s0_561, 1, 0); \ + int32x4_t __rev1_561; __rev1_561 = __builtin_shufflevector(__s1_561, __s1_561, 3, 2, 1, 0); \ + __ret_561 = __rev0_561 * __noswap_splat_laneq_s32(__rev1_561, __p2_561); \ + __ret_561 = __builtin_shufflevector(__ret_561, __ret_561, 1, 0); \ + __ret_561; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vmul_laneq_s16(__p0_666, __p1_666, __p2_666) __extension__ ({ \ - int16x4_t __ret_666; \ - int16x4_t __s0_666 = __p0_666; \ - int16x8_t __s1_666 = __p1_666; \ - __ret_666 = __s0_666 * splat_laneq_s16(__s1_666, __p2_666); \ - __ret_666; \ +#define vmul_laneq_s16(__p0_562, __p1_562, __p2_562) __extension__ ({ \ + int16x4_t __ret_562; \ + int16x4_t __s0_562 = __p0_562; \ + int16x8_t __s1_562 = __p1_562; \ + __ret_562 = __s0_562 * splat_laneq_s16(__s1_562, __p2_562); \ + __ret_562; \ }) #else -#define vmul_laneq_s16(__p0_667, __p1_667, __p2_667) __extension__ ({ \ - int16x4_t __ret_667; \ - int16x4_t __s0_667 = __p0_667; \ - int16x8_t __s1_667 = __p1_667; \ - int16x4_t __rev0_667; __rev0_667 = __builtin_shufflevector(__s0_667, __s0_667, 3, 2, 1, 0); \ - int16x8_t __rev1_667; __rev1_667 = __builtin_shufflevector(__s1_667, __s1_667, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_667 = __rev0_667 * __noswap_splat_laneq_s16(__rev1_667, __p2_667); \ - __ret_667 = __builtin_shufflevector(__ret_667, __ret_667, 3, 2, 1, 0); \ - __ret_667; \ +#define vmul_laneq_s16(__p0_563, __p1_563, __p2_563) __extension__ ({ \ + int16x4_t __ret_563; \ + int16x4_t __s0_563 = __p0_563; \ + int16x8_t __s1_563 = __p1_563; \ + int16x4_t __rev0_563; __rev0_563 = __builtin_shufflevector(__s0_563, __s0_563, 3, 2, 1, 0); \ + int16x8_t __rev1_563; __rev1_563 = __builtin_shufflevector(__s1_563, __s1_563, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_563 = __rev0_563 * __noswap_splat_laneq_s16(__rev1_563, __p2_563); \ + __ret_563 = __builtin_shufflevector(__ret_563, __ret_563, 3, 2, 1, 0); \ + __ret_563; \ }) #endif @@ -57100,11 +51193,6 @@ __ai float64x2_t vmulq_n_f64(float64x2_t __p0, float64_t __p1) { } #endif -__ai poly128_t vmull_p64(poly64_t __p0, poly64_t __p1) { - poly128_t __ret; - __ret = (poly128_t) __builtin_neon_vmull_p64(__p0, __p1); - return __ret; -} #ifdef __LITTLE_ENDIAN__ __ai poly16x8_t vmull_high_p8(poly8x16_t __p0, poly8x16_t __p1) { poly16x8_t __ret; @@ -57225,186 +51313,170 @@ __ai int32x4_t vmull_high_s16(int16x8_t __p0, int16x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai poly128_t vmull_high_p64(poly64x2_t __p0, poly64x2_t __p1) { - poly128_t __ret; - __ret = vmull_p64((poly64_t)(vget_high_p64(__p0)), (poly64_t)(vget_high_p64(__p1))); - return __ret; -} -#else -__ai poly128_t vmull_high_p64(poly64x2_t __p0, poly64x2_t __p1) { - poly128_t __ret; - poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __ret = vmull_p64((poly64_t)(__noswap_vget_high_p64(__rev0)), (poly64_t)(__noswap_vget_high_p64(__rev1))); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmull_high_lane_u32(__p0_668, __p1_668, __p2_668) __extension__ ({ \ - uint64x2_t __ret_668; \ - uint32x4_t __s0_668 = __p0_668; \ - uint32x2_t __s1_668 = __p1_668; \ - __ret_668 = vmull_u32(vget_high_u32(__s0_668), splat_lane_u32(__s1_668, __p2_668)); \ - __ret_668; \ +#define vmull_high_lane_u32(__p0_564, __p1_564, __p2_564) __extension__ ({ \ + uint64x2_t __ret_564; \ + uint32x4_t __s0_564 = __p0_564; \ + uint32x2_t __s1_564 = __p1_564; \ + __ret_564 = vmull_u32(vget_high_u32(__s0_564), splat_lane_u32(__s1_564, __p2_564)); \ + __ret_564; \ }) #else -#define vmull_high_lane_u32(__p0_669, __p1_669, __p2_669) __extension__ ({ \ - uint64x2_t __ret_669; \ - uint32x4_t __s0_669 = __p0_669; \ - uint32x2_t __s1_669 = __p1_669; \ - uint32x4_t __rev0_669; __rev0_669 = __builtin_shufflevector(__s0_669, __s0_669, 3, 2, 1, 0); \ - uint32x2_t __rev1_669; __rev1_669 = __builtin_shufflevector(__s1_669, __s1_669, 1, 0); \ - __ret_669 = __noswap_vmull_u32(__noswap_vget_high_u32(__rev0_669), __noswap_splat_lane_u32(__rev1_669, __p2_669)); \ - __ret_669 = __builtin_shufflevector(__ret_669, __ret_669, 1, 0); \ - __ret_669; \ +#define vmull_high_lane_u32(__p0_565, __p1_565, __p2_565) __extension__ ({ \ + uint64x2_t __ret_565; \ + uint32x4_t __s0_565 = __p0_565; \ + uint32x2_t __s1_565 = __p1_565; \ + uint32x4_t __rev0_565; __rev0_565 = __builtin_shufflevector(__s0_565, __s0_565, 3, 2, 1, 0); \ + uint32x2_t __rev1_565; __rev1_565 = __builtin_shufflevector(__s1_565, __s1_565, 1, 0); \ + __ret_565 = __noswap_vmull_u32(__noswap_vget_high_u32(__rev0_565), __noswap_splat_lane_u32(__rev1_565, __p2_565)); \ + __ret_565 = __builtin_shufflevector(__ret_565, __ret_565, 1, 0); \ + __ret_565; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vmull_high_lane_u16(__p0_670, __p1_670, __p2_670) __extension__ ({ \ - uint32x4_t __ret_670; \ - uint16x8_t __s0_670 = __p0_670; \ - uint16x4_t __s1_670 = __p1_670; \ - __ret_670 = vmull_u16(vget_high_u16(__s0_670), splat_lane_u16(__s1_670, __p2_670)); \ - __ret_670; \ +#define vmull_high_lane_u16(__p0_566, __p1_566, __p2_566) __extension__ ({ \ + uint32x4_t __ret_566; \ + uint16x8_t __s0_566 = __p0_566; \ + uint16x4_t __s1_566 = __p1_566; \ + __ret_566 = vmull_u16(vget_high_u16(__s0_566), splat_lane_u16(__s1_566, __p2_566)); \ + __ret_566; \ }) #else -#define vmull_high_lane_u16(__p0_671, __p1_671, __p2_671) __extension__ ({ \ - uint32x4_t __ret_671; \ - uint16x8_t __s0_671 = __p0_671; \ - uint16x4_t __s1_671 = __p1_671; \ - uint16x8_t __rev0_671; __rev0_671 = __builtin_shufflevector(__s0_671, __s0_671, 7, 6, 5, 4, 3, 2, 1, 0); \ - uint16x4_t __rev1_671; __rev1_671 = __builtin_shufflevector(__s1_671, __s1_671, 3, 2, 1, 0); \ - __ret_671 = __noswap_vmull_u16(__noswap_vget_high_u16(__rev0_671), __noswap_splat_lane_u16(__rev1_671, __p2_671)); \ - __ret_671 = __builtin_shufflevector(__ret_671, __ret_671, 3, 2, 1, 0); \ - __ret_671; \ +#define vmull_high_lane_u16(__p0_567, __p1_567, __p2_567) __extension__ ({ \ + uint32x4_t __ret_567; \ + uint16x8_t __s0_567 = __p0_567; \ + uint16x4_t __s1_567 = __p1_567; \ + uint16x8_t __rev0_567; __rev0_567 = __builtin_shufflevector(__s0_567, __s0_567, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint16x4_t __rev1_567; __rev1_567 = __builtin_shufflevector(__s1_567, __s1_567, 3, 2, 1, 0); \ + __ret_567 = __noswap_vmull_u16(__noswap_vget_high_u16(__rev0_567), __noswap_splat_lane_u16(__rev1_567, __p2_567)); \ + __ret_567 = __builtin_shufflevector(__ret_567, __ret_567, 3, 2, 1, 0); \ + __ret_567; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vmull_high_lane_s32(__p0_672, __p1_672, __p2_672) __extension__ ({ \ - int64x2_t __ret_672; \ - int32x4_t __s0_672 = __p0_672; \ - int32x2_t __s1_672 = __p1_672; \ - __ret_672 = vmull_s32(vget_high_s32(__s0_672), splat_lane_s32(__s1_672, __p2_672)); \ - __ret_672; \ +#define vmull_high_lane_s32(__p0_568, __p1_568, __p2_568) __extension__ ({ \ + int64x2_t __ret_568; \ + int32x4_t __s0_568 = __p0_568; \ + int32x2_t __s1_568 = __p1_568; \ + __ret_568 = vmull_s32(vget_high_s32(__s0_568), splat_lane_s32(__s1_568, __p2_568)); \ + __ret_568; \ }) #else -#define vmull_high_lane_s32(__p0_673, __p1_673, __p2_673) __extension__ ({ \ - int64x2_t __ret_673; \ - int32x4_t __s0_673 = __p0_673; \ - int32x2_t __s1_673 = __p1_673; \ - int32x4_t __rev0_673; __rev0_673 = __builtin_shufflevector(__s0_673, __s0_673, 3, 2, 1, 0); \ - int32x2_t __rev1_673; __rev1_673 = __builtin_shufflevector(__s1_673, __s1_673, 1, 0); \ - __ret_673 = __noswap_vmull_s32(__noswap_vget_high_s32(__rev0_673), __noswap_splat_lane_s32(__rev1_673, __p2_673)); \ - __ret_673 = __builtin_shufflevector(__ret_673, __ret_673, 1, 0); \ - __ret_673; \ +#define vmull_high_lane_s32(__p0_569, __p1_569, __p2_569) __extension__ ({ \ + int64x2_t __ret_569; \ + int32x4_t __s0_569 = __p0_569; \ + int32x2_t __s1_569 = __p1_569; \ + int32x4_t __rev0_569; __rev0_569 = __builtin_shufflevector(__s0_569, __s0_569, 3, 2, 1, 0); \ + int32x2_t __rev1_569; __rev1_569 = __builtin_shufflevector(__s1_569, __s1_569, 1, 0); \ + __ret_569 = __noswap_vmull_s32(__noswap_vget_high_s32(__rev0_569), __noswap_splat_lane_s32(__rev1_569, __p2_569)); \ + __ret_569 = __builtin_shufflevector(__ret_569, __ret_569, 1, 0); \ + __ret_569; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vmull_high_lane_s16(__p0_674, __p1_674, __p2_674) __extension__ ({ \ - int32x4_t __ret_674; \ - int16x8_t __s0_674 = __p0_674; \ - int16x4_t __s1_674 = __p1_674; \ - __ret_674 = vmull_s16(vget_high_s16(__s0_674), splat_lane_s16(__s1_674, __p2_674)); \ - __ret_674; \ +#define vmull_high_lane_s16(__p0_570, __p1_570, __p2_570) __extension__ ({ \ + int32x4_t __ret_570; \ + int16x8_t __s0_570 = __p0_570; \ + int16x4_t __s1_570 = __p1_570; \ + __ret_570 = vmull_s16(vget_high_s16(__s0_570), splat_lane_s16(__s1_570, __p2_570)); \ + __ret_570; \ }) #else -#define vmull_high_lane_s16(__p0_675, __p1_675, __p2_675) __extension__ ({ \ - int32x4_t __ret_675; \ - int16x8_t __s0_675 = __p0_675; \ - int16x4_t __s1_675 = __p1_675; \ - int16x8_t __rev0_675; __rev0_675 = __builtin_shufflevector(__s0_675, __s0_675, 7, 6, 5, 4, 3, 2, 1, 0); \ - int16x4_t __rev1_675; __rev1_675 = __builtin_shufflevector(__s1_675, __s1_675, 3, 2, 1, 0); \ - __ret_675 = __noswap_vmull_s16(__noswap_vget_high_s16(__rev0_675), __noswap_splat_lane_s16(__rev1_675, __p2_675)); \ - __ret_675 = __builtin_shufflevector(__ret_675, __ret_675, 3, 2, 1, 0); \ - __ret_675; \ +#define vmull_high_lane_s16(__p0_571, __p1_571, __p2_571) __extension__ ({ \ + int32x4_t __ret_571; \ + int16x8_t __s0_571 = __p0_571; \ + int16x4_t __s1_571 = __p1_571; \ + int16x8_t __rev0_571; __rev0_571 = __builtin_shufflevector(__s0_571, __s0_571, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x4_t __rev1_571; __rev1_571 = __builtin_shufflevector(__s1_571, __s1_571, 3, 2, 1, 0); \ + __ret_571 = __noswap_vmull_s16(__noswap_vget_high_s16(__rev0_571), __noswap_splat_lane_s16(__rev1_571, __p2_571)); \ + __ret_571 = __builtin_shufflevector(__ret_571, __ret_571, 3, 2, 1, 0); \ + __ret_571; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vmull_high_laneq_u32(__p0_676, __p1_676, __p2_676) __extension__ ({ \ - uint64x2_t __ret_676; \ - uint32x4_t __s0_676 = __p0_676; \ - uint32x4_t __s1_676 = __p1_676; \ - __ret_676 = vmull_u32(vget_high_u32(__s0_676), splat_laneq_u32(__s1_676, __p2_676)); \ - __ret_676; \ +#define vmull_high_laneq_u32(__p0_572, __p1_572, __p2_572) __extension__ ({ \ + uint64x2_t __ret_572; \ + uint32x4_t __s0_572 = __p0_572; \ + uint32x4_t __s1_572 = __p1_572; \ + __ret_572 = vmull_u32(vget_high_u32(__s0_572), splat_laneq_u32(__s1_572, __p2_572)); \ + __ret_572; \ }) #else -#define vmull_high_laneq_u32(__p0_677, __p1_677, __p2_677) __extension__ ({ \ - uint64x2_t __ret_677; \ - uint32x4_t __s0_677 = __p0_677; \ - uint32x4_t __s1_677 = __p1_677; \ - uint32x4_t __rev0_677; __rev0_677 = __builtin_shufflevector(__s0_677, __s0_677, 3, 2, 1, 0); \ - uint32x4_t __rev1_677; __rev1_677 = __builtin_shufflevector(__s1_677, __s1_677, 3, 2, 1, 0); \ - __ret_677 = __noswap_vmull_u32(__noswap_vget_high_u32(__rev0_677), __noswap_splat_laneq_u32(__rev1_677, __p2_677)); \ - __ret_677 = __builtin_shufflevector(__ret_677, __ret_677, 1, 0); \ - __ret_677; \ +#define vmull_high_laneq_u32(__p0_573, __p1_573, __p2_573) __extension__ ({ \ + uint64x2_t __ret_573; \ + uint32x4_t __s0_573 = __p0_573; \ + uint32x4_t __s1_573 = __p1_573; \ + uint32x4_t __rev0_573; __rev0_573 = __builtin_shufflevector(__s0_573, __s0_573, 3, 2, 1, 0); \ + uint32x4_t __rev1_573; __rev1_573 = __builtin_shufflevector(__s1_573, __s1_573, 3, 2, 1, 0); \ + __ret_573 = __noswap_vmull_u32(__noswap_vget_high_u32(__rev0_573), __noswap_splat_laneq_u32(__rev1_573, __p2_573)); \ + __ret_573 = __builtin_shufflevector(__ret_573, __ret_573, 1, 0); \ + __ret_573; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vmull_high_laneq_u16(__p0_678, __p1_678, __p2_678) __extension__ ({ \ - uint32x4_t __ret_678; \ - uint16x8_t __s0_678 = __p0_678; \ - uint16x8_t __s1_678 = __p1_678; \ - __ret_678 = vmull_u16(vget_high_u16(__s0_678), splat_laneq_u16(__s1_678, __p2_678)); \ - __ret_678; \ +#define vmull_high_laneq_u16(__p0_574, __p1_574, __p2_574) __extension__ ({ \ + uint32x4_t __ret_574; \ + uint16x8_t __s0_574 = __p0_574; \ + uint16x8_t __s1_574 = __p1_574; \ + __ret_574 = vmull_u16(vget_high_u16(__s0_574), splat_laneq_u16(__s1_574, __p2_574)); \ + __ret_574; \ }) #else -#define vmull_high_laneq_u16(__p0_679, __p1_679, __p2_679) __extension__ ({ \ - uint32x4_t __ret_679; \ - uint16x8_t __s0_679 = __p0_679; \ - uint16x8_t __s1_679 = __p1_679; \ - uint16x8_t __rev0_679; __rev0_679 = __builtin_shufflevector(__s0_679, __s0_679, 7, 6, 5, 4, 3, 2, 1, 0); \ - uint16x8_t __rev1_679; __rev1_679 = __builtin_shufflevector(__s1_679, __s1_679, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_679 = __noswap_vmull_u16(__noswap_vget_high_u16(__rev0_679), __noswap_splat_laneq_u16(__rev1_679, __p2_679)); \ - __ret_679 = __builtin_shufflevector(__ret_679, __ret_679, 3, 2, 1, 0); \ - __ret_679; \ +#define vmull_high_laneq_u16(__p0_575, __p1_575, __p2_575) __extension__ ({ \ + uint32x4_t __ret_575; \ + uint16x8_t __s0_575 = __p0_575; \ + uint16x8_t __s1_575 = __p1_575; \ + uint16x8_t __rev0_575; __rev0_575 = __builtin_shufflevector(__s0_575, __s0_575, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint16x8_t __rev1_575; __rev1_575 = __builtin_shufflevector(__s1_575, __s1_575, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_575 = __noswap_vmull_u16(__noswap_vget_high_u16(__rev0_575), __noswap_splat_laneq_u16(__rev1_575, __p2_575)); \ + __ret_575 = __builtin_shufflevector(__ret_575, __ret_575, 3, 2, 1, 0); \ + __ret_575; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vmull_high_laneq_s32(__p0_680, __p1_680, __p2_680) __extension__ ({ \ - int64x2_t __ret_680; \ - int32x4_t __s0_680 = __p0_680; \ - int32x4_t __s1_680 = __p1_680; \ - __ret_680 = vmull_s32(vget_high_s32(__s0_680), splat_laneq_s32(__s1_680, __p2_680)); \ - __ret_680; \ +#define vmull_high_laneq_s32(__p0_576, __p1_576, __p2_576) __extension__ ({ \ + int64x2_t __ret_576; \ + int32x4_t __s0_576 = __p0_576; \ + int32x4_t __s1_576 = __p1_576; \ + __ret_576 = vmull_s32(vget_high_s32(__s0_576), splat_laneq_s32(__s1_576, __p2_576)); \ + __ret_576; \ }) #else -#define vmull_high_laneq_s32(__p0_681, __p1_681, __p2_681) __extension__ ({ \ - int64x2_t __ret_681; \ - int32x4_t __s0_681 = __p0_681; \ - int32x4_t __s1_681 = __p1_681; \ - int32x4_t __rev0_681; __rev0_681 = __builtin_shufflevector(__s0_681, __s0_681, 3, 2, 1, 0); \ - int32x4_t __rev1_681; __rev1_681 = __builtin_shufflevector(__s1_681, __s1_681, 3, 2, 1, 0); \ - __ret_681 = __noswap_vmull_s32(__noswap_vget_high_s32(__rev0_681), __noswap_splat_laneq_s32(__rev1_681, __p2_681)); \ - __ret_681 = __builtin_shufflevector(__ret_681, __ret_681, 1, 0); \ - __ret_681; \ +#define vmull_high_laneq_s32(__p0_577, __p1_577, __p2_577) __extension__ ({ \ + int64x2_t __ret_577; \ + int32x4_t __s0_577 = __p0_577; \ + int32x4_t __s1_577 = __p1_577; \ + int32x4_t __rev0_577; __rev0_577 = __builtin_shufflevector(__s0_577, __s0_577, 3, 2, 1, 0); \ + int32x4_t __rev1_577; __rev1_577 = __builtin_shufflevector(__s1_577, __s1_577, 3, 2, 1, 0); \ + __ret_577 = __noswap_vmull_s32(__noswap_vget_high_s32(__rev0_577), __noswap_splat_laneq_s32(__rev1_577, __p2_577)); \ + __ret_577 = __builtin_shufflevector(__ret_577, __ret_577, 1, 0); \ + __ret_577; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vmull_high_laneq_s16(__p0_682, __p1_682, __p2_682) __extension__ ({ \ - int32x4_t __ret_682; \ - int16x8_t __s0_682 = __p0_682; \ - int16x8_t __s1_682 = __p1_682; \ - __ret_682 = vmull_s16(vget_high_s16(__s0_682), splat_laneq_s16(__s1_682, __p2_682)); \ - __ret_682; \ +#define vmull_high_laneq_s16(__p0_578, __p1_578, __p2_578) __extension__ ({ \ + int32x4_t __ret_578; \ + int16x8_t __s0_578 = __p0_578; \ + int16x8_t __s1_578 = __p1_578; \ + __ret_578 = vmull_s16(vget_high_s16(__s0_578), splat_laneq_s16(__s1_578, __p2_578)); \ + __ret_578; \ }) #else -#define vmull_high_laneq_s16(__p0_683, __p1_683, __p2_683) __extension__ ({ \ - int32x4_t __ret_683; \ - int16x8_t __s0_683 = __p0_683; \ - int16x8_t __s1_683 = __p1_683; \ - int16x8_t __rev0_683; __rev0_683 = __builtin_shufflevector(__s0_683, __s0_683, 7, 6, 5, 4, 3, 2, 1, 0); \ - int16x8_t __rev1_683; __rev1_683 = __builtin_shufflevector(__s1_683, __s1_683, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_683 = __noswap_vmull_s16(__noswap_vget_high_s16(__rev0_683), __noswap_splat_laneq_s16(__rev1_683, __p2_683)); \ - __ret_683 = __builtin_shufflevector(__ret_683, __ret_683, 3, 2, 1, 0); \ - __ret_683; \ +#define vmull_high_laneq_s16(__p0_579, __p1_579, __p2_579) __extension__ ({ \ + int32x4_t __ret_579; \ + int16x8_t __s0_579 = __p0_579; \ + int16x8_t __s1_579 = __p1_579; \ + int16x8_t __rev0_579; __rev0_579 = __builtin_shufflevector(__s0_579, __s0_579, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __rev1_579; __rev1_579 = __builtin_shufflevector(__s1_579, __s1_579, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_579 = __noswap_vmull_s16(__noswap_vget_high_s16(__rev0_579), __noswap_splat_laneq_s16(__rev1_579, __p2_579)); \ + __ret_579 = __builtin_shufflevector(__ret_579, __ret_579, 3, 2, 1, 0); \ + __ret_579; \ }) #endif @@ -57473,86 +51545,86 @@ __ai int32x4_t vmull_high_n_s16(int16x8_t __p0, int16_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -#define vmull_laneq_u32(__p0_684, __p1_684, __p2_684) __extension__ ({ \ - uint64x2_t __ret_684; \ - uint32x2_t __s0_684 = __p0_684; \ - uint32x4_t __s1_684 = __p1_684; \ - __ret_684 = vmull_u32(__s0_684, splat_laneq_u32(__s1_684, __p2_684)); \ - __ret_684; \ +#define vmull_laneq_u32(__p0_580, __p1_580, __p2_580) __extension__ ({ \ + uint64x2_t __ret_580; \ + uint32x2_t __s0_580 = __p0_580; \ + uint32x4_t __s1_580 = __p1_580; \ + __ret_580 = vmull_u32(__s0_580, splat_laneq_u32(__s1_580, __p2_580)); \ + __ret_580; \ }) #else -#define vmull_laneq_u32(__p0_685, __p1_685, __p2_685) __extension__ ({ \ - uint64x2_t __ret_685; \ - uint32x2_t __s0_685 = __p0_685; \ - uint32x4_t __s1_685 = __p1_685; \ - uint32x2_t __rev0_685; __rev0_685 = __builtin_shufflevector(__s0_685, __s0_685, 1, 0); \ - uint32x4_t __rev1_685; __rev1_685 = __builtin_shufflevector(__s1_685, __s1_685, 3, 2, 1, 0); \ - __ret_685 = __noswap_vmull_u32(__rev0_685, __noswap_splat_laneq_u32(__rev1_685, __p2_685)); \ - __ret_685 = __builtin_shufflevector(__ret_685, __ret_685, 1, 0); \ - __ret_685; \ +#define vmull_laneq_u32(__p0_581, __p1_581, __p2_581) __extension__ ({ \ + uint64x2_t __ret_581; \ + uint32x2_t __s0_581 = __p0_581; \ + uint32x4_t __s1_581 = __p1_581; \ + uint32x2_t __rev0_581; __rev0_581 = __builtin_shufflevector(__s0_581, __s0_581, 1, 0); \ + uint32x4_t __rev1_581; __rev1_581 = __builtin_shufflevector(__s1_581, __s1_581, 3, 2, 1, 0); \ + __ret_581 = __noswap_vmull_u32(__rev0_581, __noswap_splat_laneq_u32(__rev1_581, __p2_581)); \ + __ret_581 = __builtin_shufflevector(__ret_581, __ret_581, 1, 0); \ + __ret_581; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vmull_laneq_u16(__p0_686, __p1_686, __p2_686) __extension__ ({ \ - uint32x4_t __ret_686; \ - uint16x4_t __s0_686 = __p0_686; \ - uint16x8_t __s1_686 = __p1_686; \ - __ret_686 = vmull_u16(__s0_686, splat_laneq_u16(__s1_686, __p2_686)); \ - __ret_686; \ +#define vmull_laneq_u16(__p0_582, __p1_582, __p2_582) __extension__ ({ \ + uint32x4_t __ret_582; \ + uint16x4_t __s0_582 = __p0_582; \ + uint16x8_t __s1_582 = __p1_582; \ + __ret_582 = vmull_u16(__s0_582, splat_laneq_u16(__s1_582, __p2_582)); \ + __ret_582; \ }) #else -#define vmull_laneq_u16(__p0_687, __p1_687, __p2_687) __extension__ ({ \ - uint32x4_t __ret_687; \ - uint16x4_t __s0_687 = __p0_687; \ - uint16x8_t __s1_687 = __p1_687; \ - uint16x4_t __rev0_687; __rev0_687 = __builtin_shufflevector(__s0_687, __s0_687, 3, 2, 1, 0); \ - uint16x8_t __rev1_687; __rev1_687 = __builtin_shufflevector(__s1_687, __s1_687, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_687 = __noswap_vmull_u16(__rev0_687, __noswap_splat_laneq_u16(__rev1_687, __p2_687)); \ - __ret_687 = __builtin_shufflevector(__ret_687, __ret_687, 3, 2, 1, 0); \ - __ret_687; \ +#define vmull_laneq_u16(__p0_583, __p1_583, __p2_583) __extension__ ({ \ + uint32x4_t __ret_583; \ + uint16x4_t __s0_583 = __p0_583; \ + uint16x8_t __s1_583 = __p1_583; \ + uint16x4_t __rev0_583; __rev0_583 = __builtin_shufflevector(__s0_583, __s0_583, 3, 2, 1, 0); \ + uint16x8_t __rev1_583; __rev1_583 = __builtin_shufflevector(__s1_583, __s1_583, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_583 = __noswap_vmull_u16(__rev0_583, __noswap_splat_laneq_u16(__rev1_583, __p2_583)); \ + __ret_583 = __builtin_shufflevector(__ret_583, __ret_583, 3, 2, 1, 0); \ + __ret_583; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vmull_laneq_s32(__p0_688, __p1_688, __p2_688) __extension__ ({ \ - int64x2_t __ret_688; \ - int32x2_t __s0_688 = __p0_688; \ - int32x4_t __s1_688 = __p1_688; \ - __ret_688 = vmull_s32(__s0_688, splat_laneq_s32(__s1_688, __p2_688)); \ - __ret_688; \ +#define vmull_laneq_s32(__p0_584, __p1_584, __p2_584) __extension__ ({ \ + int64x2_t __ret_584; \ + int32x2_t __s0_584 = __p0_584; \ + int32x4_t __s1_584 = __p1_584; \ + __ret_584 = vmull_s32(__s0_584, splat_laneq_s32(__s1_584, __p2_584)); \ + __ret_584; \ }) #else -#define vmull_laneq_s32(__p0_689, __p1_689, __p2_689) __extension__ ({ \ - int64x2_t __ret_689; \ - int32x2_t __s0_689 = __p0_689; \ - int32x4_t __s1_689 = __p1_689; \ - int32x2_t __rev0_689; __rev0_689 = __builtin_shufflevector(__s0_689, __s0_689, 1, 0); \ - int32x4_t __rev1_689; __rev1_689 = __builtin_shufflevector(__s1_689, __s1_689, 3, 2, 1, 0); \ - __ret_689 = __noswap_vmull_s32(__rev0_689, __noswap_splat_laneq_s32(__rev1_689, __p2_689)); \ - __ret_689 = __builtin_shufflevector(__ret_689, __ret_689, 1, 0); \ - __ret_689; \ +#define vmull_laneq_s32(__p0_585, __p1_585, __p2_585) __extension__ ({ \ + int64x2_t __ret_585; \ + int32x2_t __s0_585 = __p0_585; \ + int32x4_t __s1_585 = __p1_585; \ + int32x2_t __rev0_585; __rev0_585 = __builtin_shufflevector(__s0_585, __s0_585, 1, 0); \ + int32x4_t __rev1_585; __rev1_585 = __builtin_shufflevector(__s1_585, __s1_585, 3, 2, 1, 0); \ + __ret_585 = __noswap_vmull_s32(__rev0_585, __noswap_splat_laneq_s32(__rev1_585, __p2_585)); \ + __ret_585 = __builtin_shufflevector(__ret_585, __ret_585, 1, 0); \ + __ret_585; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vmull_laneq_s16(__p0_690, __p1_690, __p2_690) __extension__ ({ \ - int32x4_t __ret_690; \ - int16x4_t __s0_690 = __p0_690; \ - int16x8_t __s1_690 = __p1_690; \ - __ret_690 = vmull_s16(__s0_690, splat_laneq_s16(__s1_690, __p2_690)); \ - __ret_690; \ +#define vmull_laneq_s16(__p0_586, __p1_586, __p2_586) __extension__ ({ \ + int32x4_t __ret_586; \ + int16x4_t __s0_586 = __p0_586; \ + int16x8_t __s1_586 = __p1_586; \ + __ret_586 = vmull_s16(__s0_586, splat_laneq_s16(__s1_586, __p2_586)); \ + __ret_586; \ }) #else -#define vmull_laneq_s16(__p0_691, __p1_691, __p2_691) __extension__ ({ \ - int32x4_t __ret_691; \ - int16x4_t __s0_691 = __p0_691; \ - int16x8_t __s1_691 = __p1_691; \ - int16x4_t __rev0_691; __rev0_691 = __builtin_shufflevector(__s0_691, __s0_691, 3, 2, 1, 0); \ - int16x8_t __rev1_691; __rev1_691 = __builtin_shufflevector(__s1_691, __s1_691, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_691 = __noswap_vmull_s16(__rev0_691, __noswap_splat_laneq_s16(__rev1_691, __p2_691)); \ - __ret_691 = __builtin_shufflevector(__ret_691, __ret_691, 3, 2, 1, 0); \ - __ret_691; \ +#define vmull_laneq_s16(__p0_587, __p1_587, __p2_587) __extension__ ({ \ + int32x4_t __ret_587; \ + int16x4_t __s0_587 = __p0_587; \ + int16x8_t __s1_587 = __p1_587; \ + int16x4_t __rev0_587; __rev0_587 = __builtin_shufflevector(__s0_587, __s0_587, 3, 2, 1, 0); \ + int16x8_t __rev1_587; __rev1_587 = __builtin_shufflevector(__s1_587, __s1_587, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_587 = __noswap_vmull_s16(__rev0_587, __noswap_splat_laneq_s16(__rev1_587, __p2_587)); \ + __ret_587 = __builtin_shufflevector(__ret_587, __ret_587, 3, 2, 1, 0); \ + __ret_587; \ }) #endif @@ -57637,192 +51709,192 @@ __ai float32_t vmulxs_f32(float32_t __p0, float32_t __p1) { __ret = (float32_t) __builtin_neon_vmulxs_f32(__p0, __p1); return __ret; } -#define vmulxd_lane_f64(__p0_692, __p1_692, __p2_692) __extension__ ({ \ - float64_t __ret_692; \ - float64_t __s0_692 = __p0_692; \ - float64x1_t __s1_692 = __p1_692; \ - __ret_692 = vmulxd_f64(__s0_692, vget_lane_f64(__s1_692, __p2_692)); \ - __ret_692; \ +#define vmulxd_lane_f64(__p0_588, __p1_588, __p2_588) __extension__ ({ \ + float64_t __ret_588; \ + float64_t __s0_588 = __p0_588; \ + float64x1_t __s1_588 = __p1_588; \ + __ret_588 = vmulxd_f64(__s0_588, vget_lane_f64(__s1_588, __p2_588)); \ + __ret_588; \ }) #ifdef __LITTLE_ENDIAN__ -#define vmulxs_lane_f32(__p0_693, __p1_693, __p2_693) __extension__ ({ \ - float32_t __ret_693; \ - float32_t __s0_693 = __p0_693; \ - float32x2_t __s1_693 = __p1_693; \ - __ret_693 = vmulxs_f32(__s0_693, vget_lane_f32(__s1_693, __p2_693)); \ - __ret_693; \ +#define vmulxs_lane_f32(__p0_589, __p1_589, __p2_589) __extension__ ({ \ + float32_t __ret_589; \ + float32_t __s0_589 = __p0_589; \ + float32x2_t __s1_589 = __p1_589; \ + __ret_589 = vmulxs_f32(__s0_589, vget_lane_f32(__s1_589, __p2_589)); \ + __ret_589; \ }) #else -#define vmulxs_lane_f32(__p0_694, __p1_694, __p2_694) __extension__ ({ \ - float32_t __ret_694; \ - float32_t __s0_694 = __p0_694; \ - float32x2_t __s1_694 = __p1_694; \ - float32x2_t __rev1_694; __rev1_694 = __builtin_shufflevector(__s1_694, __s1_694, 1, 0); \ - __ret_694 = vmulxs_f32(__s0_694, __noswap_vget_lane_f32(__rev1_694, __p2_694)); \ - __ret_694; \ +#define vmulxs_lane_f32(__p0_590, __p1_590, __p2_590) __extension__ ({ \ + float32_t __ret_590; \ + float32_t __s0_590 = __p0_590; \ + float32x2_t __s1_590 = __p1_590; \ + float32x2_t __rev1_590; __rev1_590 = __builtin_shufflevector(__s1_590, __s1_590, 1, 0); \ + __ret_590 = vmulxs_f32(__s0_590, __noswap_vget_lane_f32(__rev1_590, __p2_590)); \ + __ret_590; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vmulxq_lane_f64(__p0_695, __p1_695, __p2_695) __extension__ ({ \ - float64x2_t __ret_695; \ - float64x2_t __s0_695 = __p0_695; \ - float64x1_t __s1_695 = __p1_695; \ - __ret_695 = vmulxq_f64(__s0_695, splatq_lane_f64(__s1_695, __p2_695)); \ - __ret_695; \ +#define vmulxq_lane_f64(__p0_591, __p1_591, __p2_591) __extension__ ({ \ + float64x2_t __ret_591; \ + float64x2_t __s0_591 = __p0_591; \ + float64x1_t __s1_591 = __p1_591; \ + __ret_591 = vmulxq_f64(__s0_591, splatq_lane_f64(__s1_591, __p2_591)); \ + __ret_591; \ }) #else -#define vmulxq_lane_f64(__p0_696, __p1_696, __p2_696) __extension__ ({ \ - float64x2_t __ret_696; \ - float64x2_t __s0_696 = __p0_696; \ - float64x1_t __s1_696 = __p1_696; \ - float64x2_t __rev0_696; __rev0_696 = __builtin_shufflevector(__s0_696, __s0_696, 1, 0); \ - __ret_696 = __noswap_vmulxq_f64(__rev0_696, __noswap_splatq_lane_f64(__s1_696, __p2_696)); \ - __ret_696 = __builtin_shufflevector(__ret_696, __ret_696, 1, 0); \ - __ret_696; \ +#define vmulxq_lane_f64(__p0_592, __p1_592, __p2_592) __extension__ ({ \ + float64x2_t __ret_592; \ + float64x2_t __s0_592 = __p0_592; \ + float64x1_t __s1_592 = __p1_592; \ + float64x2_t __rev0_592; __rev0_592 = __builtin_shufflevector(__s0_592, __s0_592, 1, 0); \ + __ret_592 = __noswap_vmulxq_f64(__rev0_592, __noswap_splatq_lane_f64(__s1_592, __p2_592)); \ + __ret_592 = __builtin_shufflevector(__ret_592, __ret_592, 1, 0); \ + __ret_592; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vmulxq_lane_f32(__p0_697, __p1_697, __p2_697) __extension__ ({ \ - float32x4_t __ret_697; \ - float32x4_t __s0_697 = __p0_697; \ - float32x2_t __s1_697 = __p1_697; \ - __ret_697 = vmulxq_f32(__s0_697, splatq_lane_f32(__s1_697, __p2_697)); \ - __ret_697; \ +#define vmulxq_lane_f32(__p0_593, __p1_593, __p2_593) __extension__ ({ \ + float32x4_t __ret_593; \ + float32x4_t __s0_593 = __p0_593; \ + float32x2_t __s1_593 = __p1_593; \ + __ret_593 = vmulxq_f32(__s0_593, splatq_lane_f32(__s1_593, __p2_593)); \ + __ret_593; \ }) #else -#define vmulxq_lane_f32(__p0_698, __p1_698, __p2_698) __extension__ ({ \ - float32x4_t __ret_698; \ - float32x4_t __s0_698 = __p0_698; \ - float32x2_t __s1_698 = __p1_698; \ - float32x4_t __rev0_698; __rev0_698 = __builtin_shufflevector(__s0_698, __s0_698, 3, 2, 1, 0); \ - float32x2_t __rev1_698; __rev1_698 = __builtin_shufflevector(__s1_698, __s1_698, 1, 0); \ - __ret_698 = __noswap_vmulxq_f32(__rev0_698, __noswap_splatq_lane_f32(__rev1_698, __p2_698)); \ - __ret_698 = __builtin_shufflevector(__ret_698, __ret_698, 3, 2, 1, 0); \ - __ret_698; \ +#define vmulxq_lane_f32(__p0_594, __p1_594, __p2_594) __extension__ ({ \ + float32x4_t __ret_594; \ + float32x4_t __s0_594 = __p0_594; \ + float32x2_t __s1_594 = __p1_594; \ + float32x4_t __rev0_594; __rev0_594 = __builtin_shufflevector(__s0_594, __s0_594, 3, 2, 1, 0); \ + float32x2_t __rev1_594; __rev1_594 = __builtin_shufflevector(__s1_594, __s1_594, 1, 0); \ + __ret_594 = __noswap_vmulxq_f32(__rev0_594, __noswap_splatq_lane_f32(__rev1_594, __p2_594)); \ + __ret_594 = __builtin_shufflevector(__ret_594, __ret_594, 3, 2, 1, 0); \ + __ret_594; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vmulx_lane_f32(__p0_699, __p1_699, __p2_699) __extension__ ({ \ - float32x2_t __ret_699; \ - float32x2_t __s0_699 = __p0_699; \ - float32x2_t __s1_699 = __p1_699; \ - __ret_699 = vmulx_f32(__s0_699, splat_lane_f32(__s1_699, __p2_699)); \ - __ret_699; \ +#define vmulx_lane_f32(__p0_595, __p1_595, __p2_595) __extension__ ({ \ + float32x2_t __ret_595; \ + float32x2_t __s0_595 = __p0_595; \ + float32x2_t __s1_595 = __p1_595; \ + __ret_595 = vmulx_f32(__s0_595, splat_lane_f32(__s1_595, __p2_595)); \ + __ret_595; \ }) #else -#define vmulx_lane_f32(__p0_700, __p1_700, __p2_700) __extension__ ({ \ - float32x2_t __ret_700; \ - float32x2_t __s0_700 = __p0_700; \ - float32x2_t __s1_700 = __p1_700; \ - float32x2_t __rev0_700; __rev0_700 = __builtin_shufflevector(__s0_700, __s0_700, 1, 0); \ - float32x2_t __rev1_700; __rev1_700 = __builtin_shufflevector(__s1_700, __s1_700, 1, 0); \ - __ret_700 = __noswap_vmulx_f32(__rev0_700, __noswap_splat_lane_f32(__rev1_700, __p2_700)); \ - __ret_700 = __builtin_shufflevector(__ret_700, __ret_700, 1, 0); \ - __ret_700; \ +#define vmulx_lane_f32(__p0_596, __p1_596, __p2_596) __extension__ ({ \ + float32x2_t __ret_596; \ + float32x2_t __s0_596 = __p0_596; \ + float32x2_t __s1_596 = __p1_596; \ + float32x2_t __rev0_596; __rev0_596 = __builtin_shufflevector(__s0_596, __s0_596, 1, 0); \ + float32x2_t __rev1_596; __rev1_596 = __builtin_shufflevector(__s1_596, __s1_596, 1, 0); \ + __ret_596 = __noswap_vmulx_f32(__rev0_596, __noswap_splat_lane_f32(__rev1_596, __p2_596)); \ + __ret_596 = __builtin_shufflevector(__ret_596, __ret_596, 1, 0); \ + __ret_596; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vmulxd_laneq_f64(__p0_701, __p1_701, __p2_701) __extension__ ({ \ - float64_t __ret_701; \ - float64_t __s0_701 = __p0_701; \ - float64x2_t __s1_701 = __p1_701; \ - __ret_701 = vmulxd_f64(__s0_701, vgetq_lane_f64(__s1_701, __p2_701)); \ - __ret_701; \ +#define vmulxd_laneq_f64(__p0_597, __p1_597, __p2_597) __extension__ ({ \ + float64_t __ret_597; \ + float64_t __s0_597 = __p0_597; \ + float64x2_t __s1_597 = __p1_597; \ + __ret_597 = vmulxd_f64(__s0_597, vgetq_lane_f64(__s1_597, __p2_597)); \ + __ret_597; \ }) #else -#define vmulxd_laneq_f64(__p0_702, __p1_702, __p2_702) __extension__ ({ \ - float64_t __ret_702; \ - float64_t __s0_702 = __p0_702; \ - float64x2_t __s1_702 = __p1_702; \ - float64x2_t __rev1_702; __rev1_702 = __builtin_shufflevector(__s1_702, __s1_702, 1, 0); \ - __ret_702 = vmulxd_f64(__s0_702, __noswap_vgetq_lane_f64(__rev1_702, __p2_702)); \ - __ret_702; \ +#define vmulxd_laneq_f64(__p0_598, __p1_598, __p2_598) __extension__ ({ \ + float64_t __ret_598; \ + float64_t __s0_598 = __p0_598; \ + float64x2_t __s1_598 = __p1_598; \ + float64x2_t __rev1_598; __rev1_598 = __builtin_shufflevector(__s1_598, __s1_598, 1, 0); \ + __ret_598 = vmulxd_f64(__s0_598, __noswap_vgetq_lane_f64(__rev1_598, __p2_598)); \ + __ret_598; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vmulxs_laneq_f32(__p0_703, __p1_703, __p2_703) __extension__ ({ \ - float32_t __ret_703; \ - float32_t __s0_703 = __p0_703; \ - float32x4_t __s1_703 = __p1_703; \ - __ret_703 = vmulxs_f32(__s0_703, vgetq_lane_f32(__s1_703, __p2_703)); \ - __ret_703; \ +#define vmulxs_laneq_f32(__p0_599, __p1_599, __p2_599) __extension__ ({ \ + float32_t __ret_599; \ + float32_t __s0_599 = __p0_599; \ + float32x4_t __s1_599 = __p1_599; \ + __ret_599 = vmulxs_f32(__s0_599, vgetq_lane_f32(__s1_599, __p2_599)); \ + __ret_599; \ }) #else -#define vmulxs_laneq_f32(__p0_704, __p1_704, __p2_704) __extension__ ({ \ - float32_t __ret_704; \ - float32_t __s0_704 = __p0_704; \ - float32x4_t __s1_704 = __p1_704; \ - float32x4_t __rev1_704; __rev1_704 = __builtin_shufflevector(__s1_704, __s1_704, 3, 2, 1, 0); \ - __ret_704 = vmulxs_f32(__s0_704, __noswap_vgetq_lane_f32(__rev1_704, __p2_704)); \ - __ret_704; \ +#define vmulxs_laneq_f32(__p0_600, __p1_600, __p2_600) __extension__ ({ \ + float32_t __ret_600; \ + float32_t __s0_600 = __p0_600; \ + float32x4_t __s1_600 = __p1_600; \ + float32x4_t __rev1_600; __rev1_600 = __builtin_shufflevector(__s1_600, __s1_600, 3, 2, 1, 0); \ + __ret_600 = vmulxs_f32(__s0_600, __noswap_vgetq_lane_f32(__rev1_600, __p2_600)); \ + __ret_600; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vmulxq_laneq_f64(__p0_705, __p1_705, __p2_705) __extension__ ({ \ - float64x2_t __ret_705; \ - float64x2_t __s0_705 = __p0_705; \ - float64x2_t __s1_705 = __p1_705; \ - __ret_705 = vmulxq_f64(__s0_705, splatq_laneq_f64(__s1_705, __p2_705)); \ - __ret_705; \ +#define vmulxq_laneq_f64(__p0_601, __p1_601, __p2_601) __extension__ ({ \ + float64x2_t __ret_601; \ + float64x2_t __s0_601 = __p0_601; \ + float64x2_t __s1_601 = __p1_601; \ + __ret_601 = vmulxq_f64(__s0_601, splatq_laneq_f64(__s1_601, __p2_601)); \ + __ret_601; \ }) #else -#define vmulxq_laneq_f64(__p0_706, __p1_706, __p2_706) __extension__ ({ \ - float64x2_t __ret_706; \ - float64x2_t __s0_706 = __p0_706; \ - float64x2_t __s1_706 = __p1_706; \ - float64x2_t __rev0_706; __rev0_706 = __builtin_shufflevector(__s0_706, __s0_706, 1, 0); \ - float64x2_t __rev1_706; __rev1_706 = __builtin_shufflevector(__s1_706, __s1_706, 1, 0); \ - __ret_706 = __noswap_vmulxq_f64(__rev0_706, __noswap_splatq_laneq_f64(__rev1_706, __p2_706)); \ - __ret_706 = __builtin_shufflevector(__ret_706, __ret_706, 1, 0); \ - __ret_706; \ +#define vmulxq_laneq_f64(__p0_602, __p1_602, __p2_602) __extension__ ({ \ + float64x2_t __ret_602; \ + float64x2_t __s0_602 = __p0_602; \ + float64x2_t __s1_602 = __p1_602; \ + float64x2_t __rev0_602; __rev0_602 = __builtin_shufflevector(__s0_602, __s0_602, 1, 0); \ + float64x2_t __rev1_602; __rev1_602 = __builtin_shufflevector(__s1_602, __s1_602, 1, 0); \ + __ret_602 = __noswap_vmulxq_f64(__rev0_602, __noswap_splatq_laneq_f64(__rev1_602, __p2_602)); \ + __ret_602 = __builtin_shufflevector(__ret_602, __ret_602, 1, 0); \ + __ret_602; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vmulxq_laneq_f32(__p0_707, __p1_707, __p2_707) __extension__ ({ \ - float32x4_t __ret_707; \ - float32x4_t __s0_707 = __p0_707; \ - float32x4_t __s1_707 = __p1_707; \ - __ret_707 = vmulxq_f32(__s0_707, splatq_laneq_f32(__s1_707, __p2_707)); \ - __ret_707; \ +#define vmulxq_laneq_f32(__p0_603, __p1_603, __p2_603) __extension__ ({ \ + float32x4_t __ret_603; \ + float32x4_t __s0_603 = __p0_603; \ + float32x4_t __s1_603 = __p1_603; \ + __ret_603 = vmulxq_f32(__s0_603, splatq_laneq_f32(__s1_603, __p2_603)); \ + __ret_603; \ }) #else -#define vmulxq_laneq_f32(__p0_708, __p1_708, __p2_708) __extension__ ({ \ - float32x4_t __ret_708; \ - float32x4_t __s0_708 = __p0_708; \ - float32x4_t __s1_708 = __p1_708; \ - float32x4_t __rev0_708; __rev0_708 = __builtin_shufflevector(__s0_708, __s0_708, 3, 2, 1, 0); \ - float32x4_t __rev1_708; __rev1_708 = __builtin_shufflevector(__s1_708, __s1_708, 3, 2, 1, 0); \ - __ret_708 = __noswap_vmulxq_f32(__rev0_708, __noswap_splatq_laneq_f32(__rev1_708, __p2_708)); \ - __ret_708 = __builtin_shufflevector(__ret_708, __ret_708, 3, 2, 1, 0); \ - __ret_708; \ +#define vmulxq_laneq_f32(__p0_604, __p1_604, __p2_604) __extension__ ({ \ + float32x4_t __ret_604; \ + float32x4_t __s0_604 = __p0_604; \ + float32x4_t __s1_604 = __p1_604; \ + float32x4_t __rev0_604; __rev0_604 = __builtin_shufflevector(__s0_604, __s0_604, 3, 2, 1, 0); \ + float32x4_t __rev1_604; __rev1_604 = __builtin_shufflevector(__s1_604, __s1_604, 3, 2, 1, 0); \ + __ret_604 = __noswap_vmulxq_f32(__rev0_604, __noswap_splatq_laneq_f32(__rev1_604, __p2_604)); \ + __ret_604 = __builtin_shufflevector(__ret_604, __ret_604, 3, 2, 1, 0); \ + __ret_604; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vmulx_laneq_f32(__p0_709, __p1_709, __p2_709) __extension__ ({ \ - float32x2_t __ret_709; \ - float32x2_t __s0_709 = __p0_709; \ - float32x4_t __s1_709 = __p1_709; \ - __ret_709 = vmulx_f32(__s0_709, splat_laneq_f32(__s1_709, __p2_709)); \ - __ret_709; \ +#define vmulx_laneq_f32(__p0_605, __p1_605, __p2_605) __extension__ ({ \ + float32x2_t __ret_605; \ + float32x2_t __s0_605 = __p0_605; \ + float32x4_t __s1_605 = __p1_605; \ + __ret_605 = vmulx_f32(__s0_605, splat_laneq_f32(__s1_605, __p2_605)); \ + __ret_605; \ }) #else -#define vmulx_laneq_f32(__p0_710, __p1_710, __p2_710) __extension__ ({ \ - float32x2_t __ret_710; \ - float32x2_t __s0_710 = __p0_710; \ - float32x4_t __s1_710 = __p1_710; \ - float32x2_t __rev0_710; __rev0_710 = __builtin_shufflevector(__s0_710, __s0_710, 1, 0); \ - float32x4_t __rev1_710; __rev1_710 = __builtin_shufflevector(__s1_710, __s1_710, 3, 2, 1, 0); \ - __ret_710 = __noswap_vmulx_f32(__rev0_710, __noswap_splat_laneq_f32(__rev1_710, __p2_710)); \ - __ret_710 = __builtin_shufflevector(__ret_710, __ret_710, 1, 0); \ - __ret_710; \ +#define vmulx_laneq_f32(__p0_606, __p1_606, __p2_606) __extension__ ({ \ + float32x2_t __ret_606; \ + float32x2_t __s0_606 = __p0_606; \ + float32x4_t __s1_606 = __p1_606; \ + float32x2_t __rev0_606; __rev0_606 = __builtin_shufflevector(__s0_606, __s0_606, 1, 0); \ + float32x4_t __rev1_606; __rev1_606 = __builtin_shufflevector(__s1_606, __s1_606, 3, 2, 1, 0); \ + __ret_606 = __noswap_vmulx_f32(__rev0_606, __noswap_splat_laneq_f32(__rev1_606, __p2_606)); \ + __ret_606 = __builtin_shufflevector(__ret_606, __ret_606, 1, 0); \ + __ret_606; \ }) #endif @@ -58725,98 +52797,98 @@ __ai int32x4_t vqdmlal_high_s16(int32x4_t __p0, int16x8_t __p1, int16x8_t __p2) #endif #ifdef __LITTLE_ENDIAN__ -#define vqdmlal_high_lane_s32(__p0_711, __p1_711, __p2_711, __p3_711) __extension__ ({ \ - int64x2_t __ret_711; \ - int64x2_t __s0_711 = __p0_711; \ - int32x4_t __s1_711 = __p1_711; \ - int32x2_t __s2_711 = __p2_711; \ - __ret_711 = vqdmlal_s32(__s0_711, vget_high_s32(__s1_711), splat_lane_s32(__s2_711, __p3_711)); \ - __ret_711; \ +#define vqdmlal_high_lane_s32(__p0_607, __p1_607, __p2_607, __p3_607) __extension__ ({ \ + int64x2_t __ret_607; \ + int64x2_t __s0_607 = __p0_607; \ + int32x4_t __s1_607 = __p1_607; \ + int32x2_t __s2_607 = __p2_607; \ + __ret_607 = vqdmlal_s32(__s0_607, vget_high_s32(__s1_607), splat_lane_s32(__s2_607, __p3_607)); \ + __ret_607; \ }) #else -#define vqdmlal_high_lane_s32(__p0_712, __p1_712, __p2_712, __p3_712) __extension__ ({ \ - int64x2_t __ret_712; \ - int64x2_t __s0_712 = __p0_712; \ - int32x4_t __s1_712 = __p1_712; \ - int32x2_t __s2_712 = __p2_712; \ - int64x2_t __rev0_712; __rev0_712 = __builtin_shufflevector(__s0_712, __s0_712, 1, 0); \ - int32x4_t __rev1_712; __rev1_712 = __builtin_shufflevector(__s1_712, __s1_712, 3, 2, 1, 0); \ - int32x2_t __rev2_712; __rev2_712 = __builtin_shufflevector(__s2_712, __s2_712, 1, 0); \ - __ret_712 = __noswap_vqdmlal_s32(__rev0_712, __noswap_vget_high_s32(__rev1_712), __noswap_splat_lane_s32(__rev2_712, __p3_712)); \ - __ret_712 = __builtin_shufflevector(__ret_712, __ret_712, 1, 0); \ - __ret_712; \ +#define vqdmlal_high_lane_s32(__p0_608, __p1_608, __p2_608, __p3_608) __extension__ ({ \ + int64x2_t __ret_608; \ + int64x2_t __s0_608 = __p0_608; \ + int32x4_t __s1_608 = __p1_608; \ + int32x2_t __s2_608 = __p2_608; \ + int64x2_t __rev0_608; __rev0_608 = __builtin_shufflevector(__s0_608, __s0_608, 1, 0); \ + int32x4_t __rev1_608; __rev1_608 = __builtin_shufflevector(__s1_608, __s1_608, 3, 2, 1, 0); \ + int32x2_t __rev2_608; __rev2_608 = __builtin_shufflevector(__s2_608, __s2_608, 1, 0); \ + __ret_608 = __noswap_vqdmlal_s32(__rev0_608, __noswap_vget_high_s32(__rev1_608), __noswap_splat_lane_s32(__rev2_608, __p3_608)); \ + __ret_608 = __builtin_shufflevector(__ret_608, __ret_608, 1, 0); \ + __ret_608; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vqdmlal_high_lane_s16(__p0_713, __p1_713, __p2_713, __p3_713) __extension__ ({ \ - int32x4_t __ret_713; \ - int32x4_t __s0_713 = __p0_713; \ - int16x8_t __s1_713 = __p1_713; \ - int16x4_t __s2_713 = __p2_713; \ - __ret_713 = vqdmlal_s16(__s0_713, vget_high_s16(__s1_713), splat_lane_s16(__s2_713, __p3_713)); \ - __ret_713; \ +#define vqdmlal_high_lane_s16(__p0_609, __p1_609, __p2_609, __p3_609) __extension__ ({ \ + int32x4_t __ret_609; \ + int32x4_t __s0_609 = __p0_609; \ + int16x8_t __s1_609 = __p1_609; \ + int16x4_t __s2_609 = __p2_609; \ + __ret_609 = vqdmlal_s16(__s0_609, vget_high_s16(__s1_609), splat_lane_s16(__s2_609, __p3_609)); \ + __ret_609; \ }) #else -#define vqdmlal_high_lane_s16(__p0_714, __p1_714, __p2_714, __p3_714) __extension__ ({ \ - int32x4_t __ret_714; \ - int32x4_t __s0_714 = __p0_714; \ - int16x8_t __s1_714 = __p1_714; \ - int16x4_t __s2_714 = __p2_714; \ - int32x4_t __rev0_714; __rev0_714 = __builtin_shufflevector(__s0_714, __s0_714, 3, 2, 1, 0); \ - int16x8_t __rev1_714; __rev1_714 = __builtin_shufflevector(__s1_714, __s1_714, 7, 6, 5, 4, 3, 2, 1, 0); \ - int16x4_t __rev2_714; __rev2_714 = __builtin_shufflevector(__s2_714, __s2_714, 3, 2, 1, 0); \ - __ret_714 = __noswap_vqdmlal_s16(__rev0_714, __noswap_vget_high_s16(__rev1_714), __noswap_splat_lane_s16(__rev2_714, __p3_714)); \ - __ret_714 = __builtin_shufflevector(__ret_714, __ret_714, 3, 2, 1, 0); \ - __ret_714; \ +#define vqdmlal_high_lane_s16(__p0_610, __p1_610, __p2_610, __p3_610) __extension__ ({ \ + int32x4_t __ret_610; \ + int32x4_t __s0_610 = __p0_610; \ + int16x8_t __s1_610 = __p1_610; \ + int16x4_t __s2_610 = __p2_610; \ + int32x4_t __rev0_610; __rev0_610 = __builtin_shufflevector(__s0_610, __s0_610, 3, 2, 1, 0); \ + int16x8_t __rev1_610; __rev1_610 = __builtin_shufflevector(__s1_610, __s1_610, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x4_t __rev2_610; __rev2_610 = __builtin_shufflevector(__s2_610, __s2_610, 3, 2, 1, 0); \ + __ret_610 = __noswap_vqdmlal_s16(__rev0_610, __noswap_vget_high_s16(__rev1_610), __noswap_splat_lane_s16(__rev2_610, __p3_610)); \ + __ret_610 = __builtin_shufflevector(__ret_610, __ret_610, 3, 2, 1, 0); \ + __ret_610; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vqdmlal_high_laneq_s32(__p0_715, __p1_715, __p2_715, __p3_715) __extension__ ({ \ - int64x2_t __ret_715; \ - int64x2_t __s0_715 = __p0_715; \ - int32x4_t __s1_715 = __p1_715; \ - int32x4_t __s2_715 = __p2_715; \ - __ret_715 = vqdmlal_s32(__s0_715, vget_high_s32(__s1_715), splat_laneq_s32(__s2_715, __p3_715)); \ - __ret_715; \ +#define vqdmlal_high_laneq_s32(__p0_611, __p1_611, __p2_611, __p3_611) __extension__ ({ \ + int64x2_t __ret_611; \ + int64x2_t __s0_611 = __p0_611; \ + int32x4_t __s1_611 = __p1_611; \ + int32x4_t __s2_611 = __p2_611; \ + __ret_611 = vqdmlal_s32(__s0_611, vget_high_s32(__s1_611), splat_laneq_s32(__s2_611, __p3_611)); \ + __ret_611; \ }) #else -#define vqdmlal_high_laneq_s32(__p0_716, __p1_716, __p2_716, __p3_716) __extension__ ({ \ - int64x2_t __ret_716; \ - int64x2_t __s0_716 = __p0_716; \ - int32x4_t __s1_716 = __p1_716; \ - int32x4_t __s2_716 = __p2_716; \ - int64x2_t __rev0_716; __rev0_716 = __builtin_shufflevector(__s0_716, __s0_716, 1, 0); \ - int32x4_t __rev1_716; __rev1_716 = __builtin_shufflevector(__s1_716, __s1_716, 3, 2, 1, 0); \ - int32x4_t __rev2_716; __rev2_716 = __builtin_shufflevector(__s2_716, __s2_716, 3, 2, 1, 0); \ - __ret_716 = __noswap_vqdmlal_s32(__rev0_716, __noswap_vget_high_s32(__rev1_716), __noswap_splat_laneq_s32(__rev2_716, __p3_716)); \ - __ret_716 = __builtin_shufflevector(__ret_716, __ret_716, 1, 0); \ - __ret_716; \ +#define vqdmlal_high_laneq_s32(__p0_612, __p1_612, __p2_612, __p3_612) __extension__ ({ \ + int64x2_t __ret_612; \ + int64x2_t __s0_612 = __p0_612; \ + int32x4_t __s1_612 = __p1_612; \ + int32x4_t __s2_612 = __p2_612; \ + int64x2_t __rev0_612; __rev0_612 = __builtin_shufflevector(__s0_612, __s0_612, 1, 0); \ + int32x4_t __rev1_612; __rev1_612 = __builtin_shufflevector(__s1_612, __s1_612, 3, 2, 1, 0); \ + int32x4_t __rev2_612; __rev2_612 = __builtin_shufflevector(__s2_612, __s2_612, 3, 2, 1, 0); \ + __ret_612 = __noswap_vqdmlal_s32(__rev0_612, __noswap_vget_high_s32(__rev1_612), __noswap_splat_laneq_s32(__rev2_612, __p3_612)); \ + __ret_612 = __builtin_shufflevector(__ret_612, __ret_612, 1, 0); \ + __ret_612; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vqdmlal_high_laneq_s16(__p0_717, __p1_717, __p2_717, __p3_717) __extension__ ({ \ - int32x4_t __ret_717; \ - int32x4_t __s0_717 = __p0_717; \ - int16x8_t __s1_717 = __p1_717; \ - int16x8_t __s2_717 = __p2_717; \ - __ret_717 = vqdmlal_s16(__s0_717, vget_high_s16(__s1_717), splat_laneq_s16(__s2_717, __p3_717)); \ - __ret_717; \ +#define vqdmlal_high_laneq_s16(__p0_613, __p1_613, __p2_613, __p3_613) __extension__ ({ \ + int32x4_t __ret_613; \ + int32x4_t __s0_613 = __p0_613; \ + int16x8_t __s1_613 = __p1_613; \ + int16x8_t __s2_613 = __p2_613; \ + __ret_613 = vqdmlal_s16(__s0_613, vget_high_s16(__s1_613), splat_laneq_s16(__s2_613, __p3_613)); \ + __ret_613; \ }) #else -#define vqdmlal_high_laneq_s16(__p0_718, __p1_718, __p2_718, __p3_718) __extension__ ({ \ - int32x4_t __ret_718; \ - int32x4_t __s0_718 = __p0_718; \ - int16x8_t __s1_718 = __p1_718; \ - int16x8_t __s2_718 = __p2_718; \ - int32x4_t __rev0_718; __rev0_718 = __builtin_shufflevector(__s0_718, __s0_718, 3, 2, 1, 0); \ - int16x8_t __rev1_718; __rev1_718 = __builtin_shufflevector(__s1_718, __s1_718, 7, 6, 5, 4, 3, 2, 1, 0); \ - int16x8_t __rev2_718; __rev2_718 = __builtin_shufflevector(__s2_718, __s2_718, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_718 = __noswap_vqdmlal_s16(__rev0_718, __noswap_vget_high_s16(__rev1_718), __noswap_splat_laneq_s16(__rev2_718, __p3_718)); \ - __ret_718 = __builtin_shufflevector(__ret_718, __ret_718, 3, 2, 1, 0); \ - __ret_718; \ +#define vqdmlal_high_laneq_s16(__p0_614, __p1_614, __p2_614, __p3_614) __extension__ ({ \ + int32x4_t __ret_614; \ + int32x4_t __s0_614 = __p0_614; \ + int16x8_t __s1_614 = __p1_614; \ + int16x8_t __s2_614 = __p2_614; \ + int32x4_t __rev0_614; __rev0_614 = __builtin_shufflevector(__s0_614, __s0_614, 3, 2, 1, 0); \ + int16x8_t __rev1_614; __rev1_614 = __builtin_shufflevector(__s1_614, __s1_614, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __rev2_614; __rev2_614 = __builtin_shufflevector(__s2_614, __s2_614, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_614 = __noswap_vqdmlal_s16(__rev0_614, __noswap_vget_high_s16(__rev1_614), __noswap_splat_laneq_s16(__rev2_614, __p3_614)); \ + __ret_614 = __builtin_shufflevector(__ret_614, __ret_614, 3, 2, 1, 0); \ + __ret_614; \ }) #endif @@ -58939,50 +53011,50 @@ __ai int32x4_t vqdmlal_high_n_s16(int32x4_t __p0, int16x8_t __p1, int16_t __p2) #endif #ifdef __LITTLE_ENDIAN__ -#define vqdmlal_laneq_s32(__p0_719, __p1_719, __p2_719, __p3_719) __extension__ ({ \ - int64x2_t __ret_719; \ - int64x2_t __s0_719 = __p0_719; \ - int32x2_t __s1_719 = __p1_719; \ - int32x4_t __s2_719 = __p2_719; \ - __ret_719 = vqdmlal_s32(__s0_719, __s1_719, splat_laneq_s32(__s2_719, __p3_719)); \ - __ret_719; \ +#define vqdmlal_laneq_s32(__p0_615, __p1_615, __p2_615, __p3_615) __extension__ ({ \ + int64x2_t __ret_615; \ + int64x2_t __s0_615 = __p0_615; \ + int32x2_t __s1_615 = __p1_615; \ + int32x4_t __s2_615 = __p2_615; \ + __ret_615 = vqdmlal_s32(__s0_615, __s1_615, splat_laneq_s32(__s2_615, __p3_615)); \ + __ret_615; \ }) #else -#define vqdmlal_laneq_s32(__p0_720, __p1_720, __p2_720, __p3_720) __extension__ ({ \ - int64x2_t __ret_720; \ - int64x2_t __s0_720 = __p0_720; \ - int32x2_t __s1_720 = __p1_720; \ - int32x4_t __s2_720 = __p2_720; \ - int64x2_t __rev0_720; __rev0_720 = __builtin_shufflevector(__s0_720, __s0_720, 1, 0); \ - int32x2_t __rev1_720; __rev1_720 = __builtin_shufflevector(__s1_720, __s1_720, 1, 0); \ - int32x4_t __rev2_720; __rev2_720 = __builtin_shufflevector(__s2_720, __s2_720, 3, 2, 1, 0); \ - __ret_720 = __noswap_vqdmlal_s32(__rev0_720, __rev1_720, __noswap_splat_laneq_s32(__rev2_720, __p3_720)); \ - __ret_720 = __builtin_shufflevector(__ret_720, __ret_720, 1, 0); \ - __ret_720; \ +#define vqdmlal_laneq_s32(__p0_616, __p1_616, __p2_616, __p3_616) __extension__ ({ \ + int64x2_t __ret_616; \ + int64x2_t __s0_616 = __p0_616; \ + int32x2_t __s1_616 = __p1_616; \ + int32x4_t __s2_616 = __p2_616; \ + int64x2_t __rev0_616; __rev0_616 = __builtin_shufflevector(__s0_616, __s0_616, 1, 0); \ + int32x2_t __rev1_616; __rev1_616 = __builtin_shufflevector(__s1_616, __s1_616, 1, 0); \ + int32x4_t __rev2_616; __rev2_616 = __builtin_shufflevector(__s2_616, __s2_616, 3, 2, 1, 0); \ + __ret_616 = __noswap_vqdmlal_s32(__rev0_616, __rev1_616, __noswap_splat_laneq_s32(__rev2_616, __p3_616)); \ + __ret_616 = __builtin_shufflevector(__ret_616, __ret_616, 1, 0); \ + __ret_616; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vqdmlal_laneq_s16(__p0_721, __p1_721, __p2_721, __p3_721) __extension__ ({ \ - int32x4_t __ret_721; \ - int32x4_t __s0_721 = __p0_721; \ - int16x4_t __s1_721 = __p1_721; \ - int16x8_t __s2_721 = __p2_721; \ - __ret_721 = vqdmlal_s16(__s0_721, __s1_721, splat_laneq_s16(__s2_721, __p3_721)); \ - __ret_721; \ +#define vqdmlal_laneq_s16(__p0_617, __p1_617, __p2_617, __p3_617) __extension__ ({ \ + int32x4_t __ret_617; \ + int32x4_t __s0_617 = __p0_617; \ + int16x4_t __s1_617 = __p1_617; \ + int16x8_t __s2_617 = __p2_617; \ + __ret_617 = vqdmlal_s16(__s0_617, __s1_617, splat_laneq_s16(__s2_617, __p3_617)); \ + __ret_617; \ }) #else -#define vqdmlal_laneq_s16(__p0_722, __p1_722, __p2_722, __p3_722) __extension__ ({ \ - int32x4_t __ret_722; \ - int32x4_t __s0_722 = __p0_722; \ - int16x4_t __s1_722 = __p1_722; \ - int16x8_t __s2_722 = __p2_722; \ - int32x4_t __rev0_722; __rev0_722 = __builtin_shufflevector(__s0_722, __s0_722, 3, 2, 1, 0); \ - int16x4_t __rev1_722; __rev1_722 = __builtin_shufflevector(__s1_722, __s1_722, 3, 2, 1, 0); \ - int16x8_t __rev2_722; __rev2_722 = __builtin_shufflevector(__s2_722, __s2_722, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_722 = __noswap_vqdmlal_s16(__rev0_722, __rev1_722, __noswap_splat_laneq_s16(__rev2_722, __p3_722)); \ - __ret_722 = __builtin_shufflevector(__ret_722, __ret_722, 3, 2, 1, 0); \ - __ret_722; \ +#define vqdmlal_laneq_s16(__p0_618, __p1_618, __p2_618, __p3_618) __extension__ ({ \ + int32x4_t __ret_618; \ + int32x4_t __s0_618 = __p0_618; \ + int16x4_t __s1_618 = __p1_618; \ + int16x8_t __s2_618 = __p2_618; \ + int32x4_t __rev0_618; __rev0_618 = __builtin_shufflevector(__s0_618, __s0_618, 3, 2, 1, 0); \ + int16x4_t __rev1_618; __rev1_618 = __builtin_shufflevector(__s1_618, __s1_618, 3, 2, 1, 0); \ + int16x8_t __rev2_618; __rev2_618 = __builtin_shufflevector(__s2_618, __s2_618, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_618 = __noswap_vqdmlal_s16(__rev0_618, __rev1_618, __noswap_splat_laneq_s16(__rev2_618, __p3_618)); \ + __ret_618 = __builtin_shufflevector(__ret_618, __ret_618, 3, 2, 1, 0); \ + __ret_618; \ }) #endif @@ -59033,98 +53105,98 @@ __ai int32x4_t vqdmlsl_high_s16(int32x4_t __p0, int16x8_t __p1, int16x8_t __p2) #endif #ifdef __LITTLE_ENDIAN__ -#define vqdmlsl_high_lane_s32(__p0_723, __p1_723, __p2_723, __p3_723) __extension__ ({ \ - int64x2_t __ret_723; \ - int64x2_t __s0_723 = __p0_723; \ - int32x4_t __s1_723 = __p1_723; \ - int32x2_t __s2_723 = __p2_723; \ - __ret_723 = vqdmlsl_s32(__s0_723, vget_high_s32(__s1_723), splat_lane_s32(__s2_723, __p3_723)); \ - __ret_723; \ +#define vqdmlsl_high_lane_s32(__p0_619, __p1_619, __p2_619, __p3_619) __extension__ ({ \ + int64x2_t __ret_619; \ + int64x2_t __s0_619 = __p0_619; \ + int32x4_t __s1_619 = __p1_619; \ + int32x2_t __s2_619 = __p2_619; \ + __ret_619 = vqdmlsl_s32(__s0_619, vget_high_s32(__s1_619), splat_lane_s32(__s2_619, __p3_619)); \ + __ret_619; \ }) #else -#define vqdmlsl_high_lane_s32(__p0_724, __p1_724, __p2_724, __p3_724) __extension__ ({ \ - int64x2_t __ret_724; \ - int64x2_t __s0_724 = __p0_724; \ - int32x4_t __s1_724 = __p1_724; \ - int32x2_t __s2_724 = __p2_724; \ - int64x2_t __rev0_724; __rev0_724 = __builtin_shufflevector(__s0_724, __s0_724, 1, 0); \ - int32x4_t __rev1_724; __rev1_724 = __builtin_shufflevector(__s1_724, __s1_724, 3, 2, 1, 0); \ - int32x2_t __rev2_724; __rev2_724 = __builtin_shufflevector(__s2_724, __s2_724, 1, 0); \ - __ret_724 = __noswap_vqdmlsl_s32(__rev0_724, __noswap_vget_high_s32(__rev1_724), __noswap_splat_lane_s32(__rev2_724, __p3_724)); \ - __ret_724 = __builtin_shufflevector(__ret_724, __ret_724, 1, 0); \ - __ret_724; \ +#define vqdmlsl_high_lane_s32(__p0_620, __p1_620, __p2_620, __p3_620) __extension__ ({ \ + int64x2_t __ret_620; \ + int64x2_t __s0_620 = __p0_620; \ + int32x4_t __s1_620 = __p1_620; \ + int32x2_t __s2_620 = __p2_620; \ + int64x2_t __rev0_620; __rev0_620 = __builtin_shufflevector(__s0_620, __s0_620, 1, 0); \ + int32x4_t __rev1_620; __rev1_620 = __builtin_shufflevector(__s1_620, __s1_620, 3, 2, 1, 0); \ + int32x2_t __rev2_620; __rev2_620 = __builtin_shufflevector(__s2_620, __s2_620, 1, 0); \ + __ret_620 = __noswap_vqdmlsl_s32(__rev0_620, __noswap_vget_high_s32(__rev1_620), __noswap_splat_lane_s32(__rev2_620, __p3_620)); \ + __ret_620 = __builtin_shufflevector(__ret_620, __ret_620, 1, 0); \ + __ret_620; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vqdmlsl_high_lane_s16(__p0_725, __p1_725, __p2_725, __p3_725) __extension__ ({ \ - int32x4_t __ret_725; \ - int32x4_t __s0_725 = __p0_725; \ - int16x8_t __s1_725 = __p1_725; \ - int16x4_t __s2_725 = __p2_725; \ - __ret_725 = vqdmlsl_s16(__s0_725, vget_high_s16(__s1_725), splat_lane_s16(__s2_725, __p3_725)); \ - __ret_725; \ +#define vqdmlsl_high_lane_s16(__p0_621, __p1_621, __p2_621, __p3_621) __extension__ ({ \ + int32x4_t __ret_621; \ + int32x4_t __s0_621 = __p0_621; \ + int16x8_t __s1_621 = __p1_621; \ + int16x4_t __s2_621 = __p2_621; \ + __ret_621 = vqdmlsl_s16(__s0_621, vget_high_s16(__s1_621), splat_lane_s16(__s2_621, __p3_621)); \ + __ret_621; \ }) #else -#define vqdmlsl_high_lane_s16(__p0_726, __p1_726, __p2_726, __p3_726) __extension__ ({ \ - int32x4_t __ret_726; \ - int32x4_t __s0_726 = __p0_726; \ - int16x8_t __s1_726 = __p1_726; \ - int16x4_t __s2_726 = __p2_726; \ - int32x4_t __rev0_726; __rev0_726 = __builtin_shufflevector(__s0_726, __s0_726, 3, 2, 1, 0); \ - int16x8_t __rev1_726; __rev1_726 = __builtin_shufflevector(__s1_726, __s1_726, 7, 6, 5, 4, 3, 2, 1, 0); \ - int16x4_t __rev2_726; __rev2_726 = __builtin_shufflevector(__s2_726, __s2_726, 3, 2, 1, 0); \ - __ret_726 = __noswap_vqdmlsl_s16(__rev0_726, __noswap_vget_high_s16(__rev1_726), __noswap_splat_lane_s16(__rev2_726, __p3_726)); \ - __ret_726 = __builtin_shufflevector(__ret_726, __ret_726, 3, 2, 1, 0); \ - __ret_726; \ +#define vqdmlsl_high_lane_s16(__p0_622, __p1_622, __p2_622, __p3_622) __extension__ ({ \ + int32x4_t __ret_622; \ + int32x4_t __s0_622 = __p0_622; \ + int16x8_t __s1_622 = __p1_622; \ + int16x4_t __s2_622 = __p2_622; \ + int32x4_t __rev0_622; __rev0_622 = __builtin_shufflevector(__s0_622, __s0_622, 3, 2, 1, 0); \ + int16x8_t __rev1_622; __rev1_622 = __builtin_shufflevector(__s1_622, __s1_622, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x4_t __rev2_622; __rev2_622 = __builtin_shufflevector(__s2_622, __s2_622, 3, 2, 1, 0); \ + __ret_622 = __noswap_vqdmlsl_s16(__rev0_622, __noswap_vget_high_s16(__rev1_622), __noswap_splat_lane_s16(__rev2_622, __p3_622)); \ + __ret_622 = __builtin_shufflevector(__ret_622, __ret_622, 3, 2, 1, 0); \ + __ret_622; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vqdmlsl_high_laneq_s32(__p0_727, __p1_727, __p2_727, __p3_727) __extension__ ({ \ - int64x2_t __ret_727; \ - int64x2_t __s0_727 = __p0_727; \ - int32x4_t __s1_727 = __p1_727; \ - int32x4_t __s2_727 = __p2_727; \ - __ret_727 = vqdmlsl_s32(__s0_727, vget_high_s32(__s1_727), splat_laneq_s32(__s2_727, __p3_727)); \ - __ret_727; \ +#define vqdmlsl_high_laneq_s32(__p0_623, __p1_623, __p2_623, __p3_623) __extension__ ({ \ + int64x2_t __ret_623; \ + int64x2_t __s0_623 = __p0_623; \ + int32x4_t __s1_623 = __p1_623; \ + int32x4_t __s2_623 = __p2_623; \ + __ret_623 = vqdmlsl_s32(__s0_623, vget_high_s32(__s1_623), splat_laneq_s32(__s2_623, __p3_623)); \ + __ret_623; \ }) #else -#define vqdmlsl_high_laneq_s32(__p0_728, __p1_728, __p2_728, __p3_728) __extension__ ({ \ - int64x2_t __ret_728; \ - int64x2_t __s0_728 = __p0_728; \ - int32x4_t __s1_728 = __p1_728; \ - int32x4_t __s2_728 = __p2_728; \ - int64x2_t __rev0_728; __rev0_728 = __builtin_shufflevector(__s0_728, __s0_728, 1, 0); \ - int32x4_t __rev1_728; __rev1_728 = __builtin_shufflevector(__s1_728, __s1_728, 3, 2, 1, 0); \ - int32x4_t __rev2_728; __rev2_728 = __builtin_shufflevector(__s2_728, __s2_728, 3, 2, 1, 0); \ - __ret_728 = __noswap_vqdmlsl_s32(__rev0_728, __noswap_vget_high_s32(__rev1_728), __noswap_splat_laneq_s32(__rev2_728, __p3_728)); \ - __ret_728 = __builtin_shufflevector(__ret_728, __ret_728, 1, 0); \ - __ret_728; \ +#define vqdmlsl_high_laneq_s32(__p0_624, __p1_624, __p2_624, __p3_624) __extension__ ({ \ + int64x2_t __ret_624; \ + int64x2_t __s0_624 = __p0_624; \ + int32x4_t __s1_624 = __p1_624; \ + int32x4_t __s2_624 = __p2_624; \ + int64x2_t __rev0_624; __rev0_624 = __builtin_shufflevector(__s0_624, __s0_624, 1, 0); \ + int32x4_t __rev1_624; __rev1_624 = __builtin_shufflevector(__s1_624, __s1_624, 3, 2, 1, 0); \ + int32x4_t __rev2_624; __rev2_624 = __builtin_shufflevector(__s2_624, __s2_624, 3, 2, 1, 0); \ + __ret_624 = __noswap_vqdmlsl_s32(__rev0_624, __noswap_vget_high_s32(__rev1_624), __noswap_splat_laneq_s32(__rev2_624, __p3_624)); \ + __ret_624 = __builtin_shufflevector(__ret_624, __ret_624, 1, 0); \ + __ret_624; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vqdmlsl_high_laneq_s16(__p0_729, __p1_729, __p2_729, __p3_729) __extension__ ({ \ - int32x4_t __ret_729; \ - int32x4_t __s0_729 = __p0_729; \ - int16x8_t __s1_729 = __p1_729; \ - int16x8_t __s2_729 = __p2_729; \ - __ret_729 = vqdmlsl_s16(__s0_729, vget_high_s16(__s1_729), splat_laneq_s16(__s2_729, __p3_729)); \ - __ret_729; \ +#define vqdmlsl_high_laneq_s16(__p0_625, __p1_625, __p2_625, __p3_625) __extension__ ({ \ + int32x4_t __ret_625; \ + int32x4_t __s0_625 = __p0_625; \ + int16x8_t __s1_625 = __p1_625; \ + int16x8_t __s2_625 = __p2_625; \ + __ret_625 = vqdmlsl_s16(__s0_625, vget_high_s16(__s1_625), splat_laneq_s16(__s2_625, __p3_625)); \ + __ret_625; \ }) #else -#define vqdmlsl_high_laneq_s16(__p0_730, __p1_730, __p2_730, __p3_730) __extension__ ({ \ - int32x4_t __ret_730; \ - int32x4_t __s0_730 = __p0_730; \ - int16x8_t __s1_730 = __p1_730; \ - int16x8_t __s2_730 = __p2_730; \ - int32x4_t __rev0_730; __rev0_730 = __builtin_shufflevector(__s0_730, __s0_730, 3, 2, 1, 0); \ - int16x8_t __rev1_730; __rev1_730 = __builtin_shufflevector(__s1_730, __s1_730, 7, 6, 5, 4, 3, 2, 1, 0); \ - int16x8_t __rev2_730; __rev2_730 = __builtin_shufflevector(__s2_730, __s2_730, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_730 = __noswap_vqdmlsl_s16(__rev0_730, __noswap_vget_high_s16(__rev1_730), __noswap_splat_laneq_s16(__rev2_730, __p3_730)); \ - __ret_730 = __builtin_shufflevector(__ret_730, __ret_730, 3, 2, 1, 0); \ - __ret_730; \ +#define vqdmlsl_high_laneq_s16(__p0_626, __p1_626, __p2_626, __p3_626) __extension__ ({ \ + int32x4_t __ret_626; \ + int32x4_t __s0_626 = __p0_626; \ + int16x8_t __s1_626 = __p1_626; \ + int16x8_t __s2_626 = __p2_626; \ + int32x4_t __rev0_626; __rev0_626 = __builtin_shufflevector(__s0_626, __s0_626, 3, 2, 1, 0); \ + int16x8_t __rev1_626; __rev1_626 = __builtin_shufflevector(__s1_626, __s1_626, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __rev2_626; __rev2_626 = __builtin_shufflevector(__s2_626, __s2_626, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_626 = __noswap_vqdmlsl_s16(__rev0_626, __noswap_vget_high_s16(__rev1_626), __noswap_splat_laneq_s16(__rev2_626, __p3_626)); \ + __ret_626 = __builtin_shufflevector(__ret_626, __ret_626, 3, 2, 1, 0); \ + __ret_626; \ }) #endif @@ -59247,50 +53319,50 @@ __ai int32x4_t vqdmlsl_high_n_s16(int32x4_t __p0, int16x8_t __p1, int16_t __p2) #endif #ifdef __LITTLE_ENDIAN__ -#define vqdmlsl_laneq_s32(__p0_731, __p1_731, __p2_731, __p3_731) __extension__ ({ \ - int64x2_t __ret_731; \ - int64x2_t __s0_731 = __p0_731; \ - int32x2_t __s1_731 = __p1_731; \ - int32x4_t __s2_731 = __p2_731; \ - __ret_731 = vqdmlsl_s32(__s0_731, __s1_731, splat_laneq_s32(__s2_731, __p3_731)); \ - __ret_731; \ +#define vqdmlsl_laneq_s32(__p0_627, __p1_627, __p2_627, __p3_627) __extension__ ({ \ + int64x2_t __ret_627; \ + int64x2_t __s0_627 = __p0_627; \ + int32x2_t __s1_627 = __p1_627; \ + int32x4_t __s2_627 = __p2_627; \ + __ret_627 = vqdmlsl_s32(__s0_627, __s1_627, splat_laneq_s32(__s2_627, __p3_627)); \ + __ret_627; \ }) #else -#define vqdmlsl_laneq_s32(__p0_732, __p1_732, __p2_732, __p3_732) __extension__ ({ \ - int64x2_t __ret_732; \ - int64x2_t __s0_732 = __p0_732; \ - int32x2_t __s1_732 = __p1_732; \ - int32x4_t __s2_732 = __p2_732; \ - int64x2_t __rev0_732; __rev0_732 = __builtin_shufflevector(__s0_732, __s0_732, 1, 0); \ - int32x2_t __rev1_732; __rev1_732 = __builtin_shufflevector(__s1_732, __s1_732, 1, 0); \ - int32x4_t __rev2_732; __rev2_732 = __builtin_shufflevector(__s2_732, __s2_732, 3, 2, 1, 0); \ - __ret_732 = __noswap_vqdmlsl_s32(__rev0_732, __rev1_732, __noswap_splat_laneq_s32(__rev2_732, __p3_732)); \ - __ret_732 = __builtin_shufflevector(__ret_732, __ret_732, 1, 0); \ - __ret_732; \ +#define vqdmlsl_laneq_s32(__p0_628, __p1_628, __p2_628, __p3_628) __extension__ ({ \ + int64x2_t __ret_628; \ + int64x2_t __s0_628 = __p0_628; \ + int32x2_t __s1_628 = __p1_628; \ + int32x4_t __s2_628 = __p2_628; \ + int64x2_t __rev0_628; __rev0_628 = __builtin_shufflevector(__s0_628, __s0_628, 1, 0); \ + int32x2_t __rev1_628; __rev1_628 = __builtin_shufflevector(__s1_628, __s1_628, 1, 0); \ + int32x4_t __rev2_628; __rev2_628 = __builtin_shufflevector(__s2_628, __s2_628, 3, 2, 1, 0); \ + __ret_628 = __noswap_vqdmlsl_s32(__rev0_628, __rev1_628, __noswap_splat_laneq_s32(__rev2_628, __p3_628)); \ + __ret_628 = __builtin_shufflevector(__ret_628, __ret_628, 1, 0); \ + __ret_628; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vqdmlsl_laneq_s16(__p0_733, __p1_733, __p2_733, __p3_733) __extension__ ({ \ - int32x4_t __ret_733; \ - int32x4_t __s0_733 = __p0_733; \ - int16x4_t __s1_733 = __p1_733; \ - int16x8_t __s2_733 = __p2_733; \ - __ret_733 = vqdmlsl_s16(__s0_733, __s1_733, splat_laneq_s16(__s2_733, __p3_733)); \ - __ret_733; \ +#define vqdmlsl_laneq_s16(__p0_629, __p1_629, __p2_629, __p3_629) __extension__ ({ \ + int32x4_t __ret_629; \ + int32x4_t __s0_629 = __p0_629; \ + int16x4_t __s1_629 = __p1_629; \ + int16x8_t __s2_629 = __p2_629; \ + __ret_629 = vqdmlsl_s16(__s0_629, __s1_629, splat_laneq_s16(__s2_629, __p3_629)); \ + __ret_629; \ }) #else -#define vqdmlsl_laneq_s16(__p0_734, __p1_734, __p2_734, __p3_734) __extension__ ({ \ - int32x4_t __ret_734; \ - int32x4_t __s0_734 = __p0_734; \ - int16x4_t __s1_734 = __p1_734; \ - int16x8_t __s2_734 = __p2_734; \ - int32x4_t __rev0_734; __rev0_734 = __builtin_shufflevector(__s0_734, __s0_734, 3, 2, 1, 0); \ - int16x4_t __rev1_734; __rev1_734 = __builtin_shufflevector(__s1_734, __s1_734, 3, 2, 1, 0); \ - int16x8_t __rev2_734; __rev2_734 = __builtin_shufflevector(__s2_734, __s2_734, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_734 = __noswap_vqdmlsl_s16(__rev0_734, __rev1_734, __noswap_splat_laneq_s16(__rev2_734, __p3_734)); \ - __ret_734 = __builtin_shufflevector(__ret_734, __ret_734, 3, 2, 1, 0); \ - __ret_734; \ +#define vqdmlsl_laneq_s16(__p0_630, __p1_630, __p2_630, __p3_630) __extension__ ({ \ + int32x4_t __ret_630; \ + int32x4_t __s0_630 = __p0_630; \ + int16x4_t __s1_630 = __p1_630; \ + int16x8_t __s2_630 = __p2_630; \ + int32x4_t __rev0_630; __rev0_630 = __builtin_shufflevector(__s0_630, __s0_630, 3, 2, 1, 0); \ + int16x4_t __rev1_630; __rev1_630 = __builtin_shufflevector(__s1_630, __s1_630, 3, 2, 1, 0); \ + int16x8_t __rev2_630; __rev2_630 = __builtin_shufflevector(__s2_630, __s2_630, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_630 = __noswap_vqdmlsl_s16(__rev0_630, __rev1_630, __noswap_splat_laneq_s16(__rev2_630, __p3_630)); \ + __ret_630 = __builtin_shufflevector(__ret_630, __ret_630, 3, 2, 1, 0); \ + __ret_630; \ }) #endif @@ -59389,78 +53461,78 @@ __ai int16_t vqdmulhh_s16(int16_t __p0, int16_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -#define vqdmulhs_lane_s32(__p0_735, __p1_735, __p2_735) __extension__ ({ \ - int32_t __ret_735; \ - int32_t __s0_735 = __p0_735; \ - int32x2_t __s1_735 = __p1_735; \ - __ret_735 = vqdmulhs_s32(__s0_735, vget_lane_s32(__s1_735, __p2_735)); \ - __ret_735; \ +#define vqdmulhs_lane_s32(__p0_631, __p1_631, __p2_631) __extension__ ({ \ + int32_t __ret_631; \ + int32_t __s0_631 = __p0_631; \ + int32x2_t __s1_631 = __p1_631; \ + __ret_631 = vqdmulhs_s32(__s0_631, vget_lane_s32(__s1_631, __p2_631)); \ + __ret_631; \ }) #else -#define vqdmulhs_lane_s32(__p0_736, __p1_736, __p2_736) __extension__ ({ \ - int32_t __ret_736; \ - int32_t __s0_736 = __p0_736; \ - int32x2_t __s1_736 = __p1_736; \ - int32x2_t __rev1_736; __rev1_736 = __builtin_shufflevector(__s1_736, __s1_736, 1, 0); \ - __ret_736 = vqdmulhs_s32(__s0_736, __noswap_vget_lane_s32(__rev1_736, __p2_736)); \ - __ret_736; \ +#define vqdmulhs_lane_s32(__p0_632, __p1_632, __p2_632) __extension__ ({ \ + int32_t __ret_632; \ + int32_t __s0_632 = __p0_632; \ + int32x2_t __s1_632 = __p1_632; \ + int32x2_t __rev1_632; __rev1_632 = __builtin_shufflevector(__s1_632, __s1_632, 1, 0); \ + __ret_632 = vqdmulhs_s32(__s0_632, __noswap_vget_lane_s32(__rev1_632, __p2_632)); \ + __ret_632; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vqdmulhh_lane_s16(__p0_737, __p1_737, __p2_737) __extension__ ({ \ - int16_t __ret_737; \ - int16_t __s0_737 = __p0_737; \ - int16x4_t __s1_737 = __p1_737; \ - __ret_737 = vqdmulhh_s16(__s0_737, vget_lane_s16(__s1_737, __p2_737)); \ - __ret_737; \ +#define vqdmulhh_lane_s16(__p0_633, __p1_633, __p2_633) __extension__ ({ \ + int16_t __ret_633; \ + int16_t __s0_633 = __p0_633; \ + int16x4_t __s1_633 = __p1_633; \ + __ret_633 = vqdmulhh_s16(__s0_633, vget_lane_s16(__s1_633, __p2_633)); \ + __ret_633; \ }) #else -#define vqdmulhh_lane_s16(__p0_738, __p1_738, __p2_738) __extension__ ({ \ - int16_t __ret_738; \ - int16_t __s0_738 = __p0_738; \ - int16x4_t __s1_738 = __p1_738; \ - int16x4_t __rev1_738; __rev1_738 = __builtin_shufflevector(__s1_738, __s1_738, 3, 2, 1, 0); \ - __ret_738 = vqdmulhh_s16(__s0_738, __noswap_vget_lane_s16(__rev1_738, __p2_738)); \ - __ret_738; \ +#define vqdmulhh_lane_s16(__p0_634, __p1_634, __p2_634) __extension__ ({ \ + int16_t __ret_634; \ + int16_t __s0_634 = __p0_634; \ + int16x4_t __s1_634 = __p1_634; \ + int16x4_t __rev1_634; __rev1_634 = __builtin_shufflevector(__s1_634, __s1_634, 3, 2, 1, 0); \ + __ret_634 = vqdmulhh_s16(__s0_634, __noswap_vget_lane_s16(__rev1_634, __p2_634)); \ + __ret_634; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vqdmulhs_laneq_s32(__p0_739, __p1_739, __p2_739) __extension__ ({ \ - int32_t __ret_739; \ - int32_t __s0_739 = __p0_739; \ - int32x4_t __s1_739 = __p1_739; \ - __ret_739 = vqdmulhs_s32(__s0_739, vgetq_lane_s32(__s1_739, __p2_739)); \ - __ret_739; \ +#define vqdmulhs_laneq_s32(__p0_635, __p1_635, __p2_635) __extension__ ({ \ + int32_t __ret_635; \ + int32_t __s0_635 = __p0_635; \ + int32x4_t __s1_635 = __p1_635; \ + __ret_635 = vqdmulhs_s32(__s0_635, vgetq_lane_s32(__s1_635, __p2_635)); \ + __ret_635; \ }) #else -#define vqdmulhs_laneq_s32(__p0_740, __p1_740, __p2_740) __extension__ ({ \ - int32_t __ret_740; \ - int32_t __s0_740 = __p0_740; \ - int32x4_t __s1_740 = __p1_740; \ - int32x4_t __rev1_740; __rev1_740 = __builtin_shufflevector(__s1_740, __s1_740, 3, 2, 1, 0); \ - __ret_740 = vqdmulhs_s32(__s0_740, __noswap_vgetq_lane_s32(__rev1_740, __p2_740)); \ - __ret_740; \ +#define vqdmulhs_laneq_s32(__p0_636, __p1_636, __p2_636) __extension__ ({ \ + int32_t __ret_636; \ + int32_t __s0_636 = __p0_636; \ + int32x4_t __s1_636 = __p1_636; \ + int32x4_t __rev1_636; __rev1_636 = __builtin_shufflevector(__s1_636, __s1_636, 3, 2, 1, 0); \ + __ret_636 = vqdmulhs_s32(__s0_636, __noswap_vgetq_lane_s32(__rev1_636, __p2_636)); \ + __ret_636; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vqdmulhh_laneq_s16(__p0_741, __p1_741, __p2_741) __extension__ ({ \ - int16_t __ret_741; \ - int16_t __s0_741 = __p0_741; \ - int16x8_t __s1_741 = __p1_741; \ - __ret_741 = vqdmulhh_s16(__s0_741, vgetq_lane_s16(__s1_741, __p2_741)); \ - __ret_741; \ +#define vqdmulhh_laneq_s16(__p0_637, __p1_637, __p2_637) __extension__ ({ \ + int16_t __ret_637; \ + int16_t __s0_637 = __p0_637; \ + int16x8_t __s1_637 = __p1_637; \ + __ret_637 = vqdmulhh_s16(__s0_637, vgetq_lane_s16(__s1_637, __p2_637)); \ + __ret_637; \ }) #else -#define vqdmulhh_laneq_s16(__p0_742, __p1_742, __p2_742) __extension__ ({ \ - int16_t __ret_742; \ - int16_t __s0_742 = __p0_742; \ - int16x8_t __s1_742 = __p1_742; \ - int16x8_t __rev1_742; __rev1_742 = __builtin_shufflevector(__s1_742, __s1_742, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_742 = vqdmulhh_s16(__s0_742, __noswap_vgetq_lane_s16(__rev1_742, __p2_742)); \ - __ret_742; \ +#define vqdmulhh_laneq_s16(__p0_638, __p1_638, __p2_638) __extension__ ({ \ + int16_t __ret_638; \ + int16_t __s0_638 = __p0_638; \ + int16x8_t __s1_638 = __p1_638; \ + int16x8_t __rev1_638; __rev1_638 = __builtin_shufflevector(__s1_638, __s1_638, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_638 = vqdmulhh_s16(__s0_638, __noswap_vgetq_lane_s16(__rev1_638, __p2_638)); \ + __ret_638; \ }) #endif @@ -59593,86 +53665,86 @@ __ai int32x4_t vqdmull_high_s16(int16x8_t __p0, int16x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -#define vqdmull_high_lane_s32(__p0_743, __p1_743, __p2_743) __extension__ ({ \ - int64x2_t __ret_743; \ - int32x4_t __s0_743 = __p0_743; \ - int32x2_t __s1_743 = __p1_743; \ - __ret_743 = vqdmull_s32(vget_high_s32(__s0_743), splat_lane_s32(__s1_743, __p2_743)); \ - __ret_743; \ +#define vqdmull_high_lane_s32(__p0_639, __p1_639, __p2_639) __extension__ ({ \ + int64x2_t __ret_639; \ + int32x4_t __s0_639 = __p0_639; \ + int32x2_t __s1_639 = __p1_639; \ + __ret_639 = vqdmull_s32(vget_high_s32(__s0_639), splat_lane_s32(__s1_639, __p2_639)); \ + __ret_639; \ }) #else -#define vqdmull_high_lane_s32(__p0_744, __p1_744, __p2_744) __extension__ ({ \ - int64x2_t __ret_744; \ - int32x4_t __s0_744 = __p0_744; \ - int32x2_t __s1_744 = __p1_744; \ - int32x4_t __rev0_744; __rev0_744 = __builtin_shufflevector(__s0_744, __s0_744, 3, 2, 1, 0); \ - int32x2_t __rev1_744; __rev1_744 = __builtin_shufflevector(__s1_744, __s1_744, 1, 0); \ - __ret_744 = __noswap_vqdmull_s32(__noswap_vget_high_s32(__rev0_744), __noswap_splat_lane_s32(__rev1_744, __p2_744)); \ - __ret_744 = __builtin_shufflevector(__ret_744, __ret_744, 1, 0); \ - __ret_744; \ +#define vqdmull_high_lane_s32(__p0_640, __p1_640, __p2_640) __extension__ ({ \ + int64x2_t __ret_640; \ + int32x4_t __s0_640 = __p0_640; \ + int32x2_t __s1_640 = __p1_640; \ + int32x4_t __rev0_640; __rev0_640 = __builtin_shufflevector(__s0_640, __s0_640, 3, 2, 1, 0); \ + int32x2_t __rev1_640; __rev1_640 = __builtin_shufflevector(__s1_640, __s1_640, 1, 0); \ + __ret_640 = __noswap_vqdmull_s32(__noswap_vget_high_s32(__rev0_640), __noswap_splat_lane_s32(__rev1_640, __p2_640)); \ + __ret_640 = __builtin_shufflevector(__ret_640, __ret_640, 1, 0); \ + __ret_640; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vqdmull_high_lane_s16(__p0_745, __p1_745, __p2_745) __extension__ ({ \ - int32x4_t __ret_745; \ - int16x8_t __s0_745 = __p0_745; \ - int16x4_t __s1_745 = __p1_745; \ - __ret_745 = vqdmull_s16(vget_high_s16(__s0_745), splat_lane_s16(__s1_745, __p2_745)); \ - __ret_745; \ +#define vqdmull_high_lane_s16(__p0_641, __p1_641, __p2_641) __extension__ ({ \ + int32x4_t __ret_641; \ + int16x8_t __s0_641 = __p0_641; \ + int16x4_t __s1_641 = __p1_641; \ + __ret_641 = vqdmull_s16(vget_high_s16(__s0_641), splat_lane_s16(__s1_641, __p2_641)); \ + __ret_641; \ }) #else -#define vqdmull_high_lane_s16(__p0_746, __p1_746, __p2_746) __extension__ ({ \ - int32x4_t __ret_746; \ - int16x8_t __s0_746 = __p0_746; \ - int16x4_t __s1_746 = __p1_746; \ - int16x8_t __rev0_746; __rev0_746 = __builtin_shufflevector(__s0_746, __s0_746, 7, 6, 5, 4, 3, 2, 1, 0); \ - int16x4_t __rev1_746; __rev1_746 = __builtin_shufflevector(__s1_746, __s1_746, 3, 2, 1, 0); \ - __ret_746 = __noswap_vqdmull_s16(__noswap_vget_high_s16(__rev0_746), __noswap_splat_lane_s16(__rev1_746, __p2_746)); \ - __ret_746 = __builtin_shufflevector(__ret_746, __ret_746, 3, 2, 1, 0); \ - __ret_746; \ +#define vqdmull_high_lane_s16(__p0_642, __p1_642, __p2_642) __extension__ ({ \ + int32x4_t __ret_642; \ + int16x8_t __s0_642 = __p0_642; \ + int16x4_t __s1_642 = __p1_642; \ + int16x8_t __rev0_642; __rev0_642 = __builtin_shufflevector(__s0_642, __s0_642, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x4_t __rev1_642; __rev1_642 = __builtin_shufflevector(__s1_642, __s1_642, 3, 2, 1, 0); \ + __ret_642 = __noswap_vqdmull_s16(__noswap_vget_high_s16(__rev0_642), __noswap_splat_lane_s16(__rev1_642, __p2_642)); \ + __ret_642 = __builtin_shufflevector(__ret_642, __ret_642, 3, 2, 1, 0); \ + __ret_642; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vqdmull_high_laneq_s32(__p0_747, __p1_747, __p2_747) __extension__ ({ \ - int64x2_t __ret_747; \ - int32x4_t __s0_747 = __p0_747; \ - int32x4_t __s1_747 = __p1_747; \ - __ret_747 = vqdmull_s32(vget_high_s32(__s0_747), splat_laneq_s32(__s1_747, __p2_747)); \ - __ret_747; \ +#define vqdmull_high_laneq_s32(__p0_643, __p1_643, __p2_643) __extension__ ({ \ + int64x2_t __ret_643; \ + int32x4_t __s0_643 = __p0_643; \ + int32x4_t __s1_643 = __p1_643; \ + __ret_643 = vqdmull_s32(vget_high_s32(__s0_643), splat_laneq_s32(__s1_643, __p2_643)); \ + __ret_643; \ }) #else -#define vqdmull_high_laneq_s32(__p0_748, __p1_748, __p2_748) __extension__ ({ \ - int64x2_t __ret_748; \ - int32x4_t __s0_748 = __p0_748; \ - int32x4_t __s1_748 = __p1_748; \ - int32x4_t __rev0_748; __rev0_748 = __builtin_shufflevector(__s0_748, __s0_748, 3, 2, 1, 0); \ - int32x4_t __rev1_748; __rev1_748 = __builtin_shufflevector(__s1_748, __s1_748, 3, 2, 1, 0); \ - __ret_748 = __noswap_vqdmull_s32(__noswap_vget_high_s32(__rev0_748), __noswap_splat_laneq_s32(__rev1_748, __p2_748)); \ - __ret_748 = __builtin_shufflevector(__ret_748, __ret_748, 1, 0); \ - __ret_748; \ +#define vqdmull_high_laneq_s32(__p0_644, __p1_644, __p2_644) __extension__ ({ \ + int64x2_t __ret_644; \ + int32x4_t __s0_644 = __p0_644; \ + int32x4_t __s1_644 = __p1_644; \ + int32x4_t __rev0_644; __rev0_644 = __builtin_shufflevector(__s0_644, __s0_644, 3, 2, 1, 0); \ + int32x4_t __rev1_644; __rev1_644 = __builtin_shufflevector(__s1_644, __s1_644, 3, 2, 1, 0); \ + __ret_644 = __noswap_vqdmull_s32(__noswap_vget_high_s32(__rev0_644), __noswap_splat_laneq_s32(__rev1_644, __p2_644)); \ + __ret_644 = __builtin_shufflevector(__ret_644, __ret_644, 1, 0); \ + __ret_644; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vqdmull_high_laneq_s16(__p0_749, __p1_749, __p2_749) __extension__ ({ \ - int32x4_t __ret_749; \ - int16x8_t __s0_749 = __p0_749; \ - int16x8_t __s1_749 = __p1_749; \ - __ret_749 = vqdmull_s16(vget_high_s16(__s0_749), splat_laneq_s16(__s1_749, __p2_749)); \ - __ret_749; \ +#define vqdmull_high_laneq_s16(__p0_645, __p1_645, __p2_645) __extension__ ({ \ + int32x4_t __ret_645; \ + int16x8_t __s0_645 = __p0_645; \ + int16x8_t __s1_645 = __p1_645; \ + __ret_645 = vqdmull_s16(vget_high_s16(__s0_645), splat_laneq_s16(__s1_645, __p2_645)); \ + __ret_645; \ }) #else -#define vqdmull_high_laneq_s16(__p0_750, __p1_750, __p2_750) __extension__ ({ \ - int32x4_t __ret_750; \ - int16x8_t __s0_750 = __p0_750; \ - int16x8_t __s1_750 = __p1_750; \ - int16x8_t __rev0_750; __rev0_750 = __builtin_shufflevector(__s0_750, __s0_750, 7, 6, 5, 4, 3, 2, 1, 0); \ - int16x8_t __rev1_750; __rev1_750 = __builtin_shufflevector(__s1_750, __s1_750, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_750 = __noswap_vqdmull_s16(__noswap_vget_high_s16(__rev0_750), __noswap_splat_laneq_s16(__rev1_750, __p2_750)); \ - __ret_750 = __builtin_shufflevector(__ret_750, __ret_750, 3, 2, 1, 0); \ - __ret_750; \ +#define vqdmull_high_laneq_s16(__p0_646, __p1_646, __p2_646) __extension__ ({ \ + int32x4_t __ret_646; \ + int16x8_t __s0_646 = __p0_646; \ + int16x8_t __s1_646 = __p1_646; \ + int16x8_t __rev0_646; __rev0_646 = __builtin_shufflevector(__s0_646, __s0_646, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __rev1_646; __rev1_646 = __builtin_shufflevector(__s1_646, __s1_646, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_646 = __noswap_vqdmull_s16(__noswap_vget_high_s16(__rev0_646), __noswap_splat_laneq_s16(__rev1_646, __p2_646)); \ + __ret_646 = __builtin_shufflevector(__ret_646, __ret_646, 3, 2, 1, 0); \ + __ret_646; \ }) #endif @@ -59709,120 +53781,120 @@ __ai int32x4_t vqdmull_high_n_s16(int16x8_t __p0, int16_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -#define vqdmulls_lane_s32(__p0_751, __p1_751, __p2_751) __extension__ ({ \ - int64_t __ret_751; \ - int32_t __s0_751 = __p0_751; \ - int32x2_t __s1_751 = __p1_751; \ - __ret_751 = vqdmulls_s32(__s0_751, vget_lane_s32(__s1_751, __p2_751)); \ - __ret_751; \ +#define vqdmulls_lane_s32(__p0_647, __p1_647, __p2_647) __extension__ ({ \ + int64_t __ret_647; \ + int32_t __s0_647 = __p0_647; \ + int32x2_t __s1_647 = __p1_647; \ + __ret_647 = vqdmulls_s32(__s0_647, vget_lane_s32(__s1_647, __p2_647)); \ + __ret_647; \ }) #else -#define vqdmulls_lane_s32(__p0_752, __p1_752, __p2_752) __extension__ ({ \ - int64_t __ret_752; \ - int32_t __s0_752 = __p0_752; \ - int32x2_t __s1_752 = __p1_752; \ - int32x2_t __rev1_752; __rev1_752 = __builtin_shufflevector(__s1_752, __s1_752, 1, 0); \ - __ret_752 = vqdmulls_s32(__s0_752, __noswap_vget_lane_s32(__rev1_752, __p2_752)); \ - __ret_752; \ +#define vqdmulls_lane_s32(__p0_648, __p1_648, __p2_648) __extension__ ({ \ + int64_t __ret_648; \ + int32_t __s0_648 = __p0_648; \ + int32x2_t __s1_648 = __p1_648; \ + int32x2_t __rev1_648; __rev1_648 = __builtin_shufflevector(__s1_648, __s1_648, 1, 0); \ + __ret_648 = vqdmulls_s32(__s0_648, __noswap_vget_lane_s32(__rev1_648, __p2_648)); \ + __ret_648; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vqdmullh_lane_s16(__p0_753, __p1_753, __p2_753) __extension__ ({ \ - int32_t __ret_753; \ - int16_t __s0_753 = __p0_753; \ - int16x4_t __s1_753 = __p1_753; \ - __ret_753 = vqdmullh_s16(__s0_753, vget_lane_s16(__s1_753, __p2_753)); \ - __ret_753; \ +#define vqdmullh_lane_s16(__p0_649, __p1_649, __p2_649) __extension__ ({ \ + int32_t __ret_649; \ + int16_t __s0_649 = __p0_649; \ + int16x4_t __s1_649 = __p1_649; \ + __ret_649 = vqdmullh_s16(__s0_649, vget_lane_s16(__s1_649, __p2_649)); \ + __ret_649; \ }) #else -#define vqdmullh_lane_s16(__p0_754, __p1_754, __p2_754) __extension__ ({ \ - int32_t __ret_754; \ - int16_t __s0_754 = __p0_754; \ - int16x4_t __s1_754 = __p1_754; \ - int16x4_t __rev1_754; __rev1_754 = __builtin_shufflevector(__s1_754, __s1_754, 3, 2, 1, 0); \ - __ret_754 = vqdmullh_s16(__s0_754, __noswap_vget_lane_s16(__rev1_754, __p2_754)); \ - __ret_754; \ +#define vqdmullh_lane_s16(__p0_650, __p1_650, __p2_650) __extension__ ({ \ + int32_t __ret_650; \ + int16_t __s0_650 = __p0_650; \ + int16x4_t __s1_650 = __p1_650; \ + int16x4_t __rev1_650; __rev1_650 = __builtin_shufflevector(__s1_650, __s1_650, 3, 2, 1, 0); \ + __ret_650 = vqdmullh_s16(__s0_650, __noswap_vget_lane_s16(__rev1_650, __p2_650)); \ + __ret_650; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vqdmulls_laneq_s32(__p0_755, __p1_755, __p2_755) __extension__ ({ \ - int64_t __ret_755; \ - int32_t __s0_755 = __p0_755; \ - int32x4_t __s1_755 = __p1_755; \ - __ret_755 = vqdmulls_s32(__s0_755, vgetq_lane_s32(__s1_755, __p2_755)); \ - __ret_755; \ +#define vqdmulls_laneq_s32(__p0_651, __p1_651, __p2_651) __extension__ ({ \ + int64_t __ret_651; \ + int32_t __s0_651 = __p0_651; \ + int32x4_t __s1_651 = __p1_651; \ + __ret_651 = vqdmulls_s32(__s0_651, vgetq_lane_s32(__s1_651, __p2_651)); \ + __ret_651; \ }) #else -#define vqdmulls_laneq_s32(__p0_756, __p1_756, __p2_756) __extension__ ({ \ - int64_t __ret_756; \ - int32_t __s0_756 = __p0_756; \ - int32x4_t __s1_756 = __p1_756; \ - int32x4_t __rev1_756; __rev1_756 = __builtin_shufflevector(__s1_756, __s1_756, 3, 2, 1, 0); \ - __ret_756 = vqdmulls_s32(__s0_756, __noswap_vgetq_lane_s32(__rev1_756, __p2_756)); \ - __ret_756; \ +#define vqdmulls_laneq_s32(__p0_652, __p1_652, __p2_652) __extension__ ({ \ + int64_t __ret_652; \ + int32_t __s0_652 = __p0_652; \ + int32x4_t __s1_652 = __p1_652; \ + int32x4_t __rev1_652; __rev1_652 = __builtin_shufflevector(__s1_652, __s1_652, 3, 2, 1, 0); \ + __ret_652 = vqdmulls_s32(__s0_652, __noswap_vgetq_lane_s32(__rev1_652, __p2_652)); \ + __ret_652; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vqdmullh_laneq_s16(__p0_757, __p1_757, __p2_757) __extension__ ({ \ - int32_t __ret_757; \ - int16_t __s0_757 = __p0_757; \ - int16x8_t __s1_757 = __p1_757; \ - __ret_757 = vqdmullh_s16(__s0_757, vgetq_lane_s16(__s1_757, __p2_757)); \ - __ret_757; \ +#define vqdmullh_laneq_s16(__p0_653, __p1_653, __p2_653) __extension__ ({ \ + int32_t __ret_653; \ + int16_t __s0_653 = __p0_653; \ + int16x8_t __s1_653 = __p1_653; \ + __ret_653 = vqdmullh_s16(__s0_653, vgetq_lane_s16(__s1_653, __p2_653)); \ + __ret_653; \ }) #else -#define vqdmullh_laneq_s16(__p0_758, __p1_758, __p2_758) __extension__ ({ \ - int32_t __ret_758; \ - int16_t __s0_758 = __p0_758; \ - int16x8_t __s1_758 = __p1_758; \ - int16x8_t __rev1_758; __rev1_758 = __builtin_shufflevector(__s1_758, __s1_758, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_758 = vqdmullh_s16(__s0_758, __noswap_vgetq_lane_s16(__rev1_758, __p2_758)); \ - __ret_758; \ +#define vqdmullh_laneq_s16(__p0_654, __p1_654, __p2_654) __extension__ ({ \ + int32_t __ret_654; \ + int16_t __s0_654 = __p0_654; \ + int16x8_t __s1_654 = __p1_654; \ + int16x8_t __rev1_654; __rev1_654 = __builtin_shufflevector(__s1_654, __s1_654, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_654 = vqdmullh_s16(__s0_654, __noswap_vgetq_lane_s16(__rev1_654, __p2_654)); \ + __ret_654; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vqdmull_laneq_s32(__p0_759, __p1_759, __p2_759) __extension__ ({ \ - int64x2_t __ret_759; \ - int32x2_t __s0_759 = __p0_759; \ - int32x4_t __s1_759 = __p1_759; \ - __ret_759 = vqdmull_s32(__s0_759, splat_laneq_s32(__s1_759, __p2_759)); \ - __ret_759; \ +#define vqdmull_laneq_s32(__p0_655, __p1_655, __p2_655) __extension__ ({ \ + int64x2_t __ret_655; \ + int32x2_t __s0_655 = __p0_655; \ + int32x4_t __s1_655 = __p1_655; \ + __ret_655 = vqdmull_s32(__s0_655, splat_laneq_s32(__s1_655, __p2_655)); \ + __ret_655; \ }) #else -#define vqdmull_laneq_s32(__p0_760, __p1_760, __p2_760) __extension__ ({ \ - int64x2_t __ret_760; \ - int32x2_t __s0_760 = __p0_760; \ - int32x4_t __s1_760 = __p1_760; \ - int32x2_t __rev0_760; __rev0_760 = __builtin_shufflevector(__s0_760, __s0_760, 1, 0); \ - int32x4_t __rev1_760; __rev1_760 = __builtin_shufflevector(__s1_760, __s1_760, 3, 2, 1, 0); \ - __ret_760 = __noswap_vqdmull_s32(__rev0_760, __noswap_splat_laneq_s32(__rev1_760, __p2_760)); \ - __ret_760 = __builtin_shufflevector(__ret_760, __ret_760, 1, 0); \ - __ret_760; \ +#define vqdmull_laneq_s32(__p0_656, __p1_656, __p2_656) __extension__ ({ \ + int64x2_t __ret_656; \ + int32x2_t __s0_656 = __p0_656; \ + int32x4_t __s1_656 = __p1_656; \ + int32x2_t __rev0_656; __rev0_656 = __builtin_shufflevector(__s0_656, __s0_656, 1, 0); \ + int32x4_t __rev1_656; __rev1_656 = __builtin_shufflevector(__s1_656, __s1_656, 3, 2, 1, 0); \ + __ret_656 = __noswap_vqdmull_s32(__rev0_656, __noswap_splat_laneq_s32(__rev1_656, __p2_656)); \ + __ret_656 = __builtin_shufflevector(__ret_656, __ret_656, 1, 0); \ + __ret_656; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vqdmull_laneq_s16(__p0_761, __p1_761, __p2_761) __extension__ ({ \ - int32x4_t __ret_761; \ - int16x4_t __s0_761 = __p0_761; \ - int16x8_t __s1_761 = __p1_761; \ - __ret_761 = vqdmull_s16(__s0_761, splat_laneq_s16(__s1_761, __p2_761)); \ - __ret_761; \ +#define vqdmull_laneq_s16(__p0_657, __p1_657, __p2_657) __extension__ ({ \ + int32x4_t __ret_657; \ + int16x4_t __s0_657 = __p0_657; \ + int16x8_t __s1_657 = __p1_657; \ + __ret_657 = vqdmull_s16(__s0_657, splat_laneq_s16(__s1_657, __p2_657)); \ + __ret_657; \ }) #else -#define vqdmull_laneq_s16(__p0_762, __p1_762, __p2_762) __extension__ ({ \ - int32x4_t __ret_762; \ - int16x4_t __s0_762 = __p0_762; \ - int16x8_t __s1_762 = __p1_762; \ - int16x4_t __rev0_762; __rev0_762 = __builtin_shufflevector(__s0_762, __s0_762, 3, 2, 1, 0); \ - int16x8_t __rev1_762; __rev1_762 = __builtin_shufflevector(__s1_762, __s1_762, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_762 = __noswap_vqdmull_s16(__rev0_762, __noswap_splat_laneq_s16(__rev1_762, __p2_762)); \ - __ret_762 = __builtin_shufflevector(__ret_762, __ret_762, 3, 2, 1, 0); \ - __ret_762; \ +#define vqdmull_laneq_s16(__p0_658, __p1_658, __p2_658) __extension__ ({ \ + int32x4_t __ret_658; \ + int16x4_t __s0_658 = __p0_658; \ + int16x8_t __s1_658 = __p1_658; \ + int16x4_t __rev0_658; __rev0_658 = __builtin_shufflevector(__s0_658, __s0_658, 3, 2, 1, 0); \ + int16x8_t __rev1_658; __rev1_658 = __builtin_shufflevector(__s1_658, __s1_658, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_658 = __noswap_vqdmull_s16(__rev0_658, __noswap_splat_laneq_s16(__rev1_658, __p2_658)); \ + __ret_658 = __builtin_shufflevector(__ret_658, __ret_658, 3, 2, 1, 0); \ + __ret_658; \ }) #endif @@ -60160,78 +54232,78 @@ __ai int16_t vqrdmulhh_s16(int16_t __p0, int16_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -#define vqrdmulhs_lane_s32(__p0_763, __p1_763, __p2_763) __extension__ ({ \ - int32_t __ret_763; \ - int32_t __s0_763 = __p0_763; \ - int32x2_t __s1_763 = __p1_763; \ - __ret_763 = vqrdmulhs_s32(__s0_763, vget_lane_s32(__s1_763, __p2_763)); \ - __ret_763; \ +#define vqrdmulhs_lane_s32(__p0_659, __p1_659, __p2_659) __extension__ ({ \ + int32_t __ret_659; \ + int32_t __s0_659 = __p0_659; \ + int32x2_t __s1_659 = __p1_659; \ + __ret_659 = vqrdmulhs_s32(__s0_659, vget_lane_s32(__s1_659, __p2_659)); \ + __ret_659; \ }) #else -#define vqrdmulhs_lane_s32(__p0_764, __p1_764, __p2_764) __extension__ ({ \ - int32_t __ret_764; \ - int32_t __s0_764 = __p0_764; \ - int32x2_t __s1_764 = __p1_764; \ - int32x2_t __rev1_764; __rev1_764 = __builtin_shufflevector(__s1_764, __s1_764, 1, 0); \ - __ret_764 = vqrdmulhs_s32(__s0_764, __noswap_vget_lane_s32(__rev1_764, __p2_764)); \ - __ret_764; \ +#define vqrdmulhs_lane_s32(__p0_660, __p1_660, __p2_660) __extension__ ({ \ + int32_t __ret_660; \ + int32_t __s0_660 = __p0_660; \ + int32x2_t __s1_660 = __p1_660; \ + int32x2_t __rev1_660; __rev1_660 = __builtin_shufflevector(__s1_660, __s1_660, 1, 0); \ + __ret_660 = vqrdmulhs_s32(__s0_660, __noswap_vget_lane_s32(__rev1_660, __p2_660)); \ + __ret_660; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vqrdmulhh_lane_s16(__p0_765, __p1_765, __p2_765) __extension__ ({ \ - int16_t __ret_765; \ - int16_t __s0_765 = __p0_765; \ - int16x4_t __s1_765 = __p1_765; \ - __ret_765 = vqrdmulhh_s16(__s0_765, vget_lane_s16(__s1_765, __p2_765)); \ - __ret_765; \ +#define vqrdmulhh_lane_s16(__p0_661, __p1_661, __p2_661) __extension__ ({ \ + int16_t __ret_661; \ + int16_t __s0_661 = __p0_661; \ + int16x4_t __s1_661 = __p1_661; \ + __ret_661 = vqrdmulhh_s16(__s0_661, vget_lane_s16(__s1_661, __p2_661)); \ + __ret_661; \ }) #else -#define vqrdmulhh_lane_s16(__p0_766, __p1_766, __p2_766) __extension__ ({ \ - int16_t __ret_766; \ - int16_t __s0_766 = __p0_766; \ - int16x4_t __s1_766 = __p1_766; \ - int16x4_t __rev1_766; __rev1_766 = __builtin_shufflevector(__s1_766, __s1_766, 3, 2, 1, 0); \ - __ret_766 = vqrdmulhh_s16(__s0_766, __noswap_vget_lane_s16(__rev1_766, __p2_766)); \ - __ret_766; \ +#define vqrdmulhh_lane_s16(__p0_662, __p1_662, __p2_662) __extension__ ({ \ + int16_t __ret_662; \ + int16_t __s0_662 = __p0_662; \ + int16x4_t __s1_662 = __p1_662; \ + int16x4_t __rev1_662; __rev1_662 = __builtin_shufflevector(__s1_662, __s1_662, 3, 2, 1, 0); \ + __ret_662 = vqrdmulhh_s16(__s0_662, __noswap_vget_lane_s16(__rev1_662, __p2_662)); \ + __ret_662; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vqrdmulhs_laneq_s32(__p0_767, __p1_767, __p2_767) __extension__ ({ \ - int32_t __ret_767; \ - int32_t __s0_767 = __p0_767; \ - int32x4_t __s1_767 = __p1_767; \ - __ret_767 = vqrdmulhs_s32(__s0_767, vgetq_lane_s32(__s1_767, __p2_767)); \ - __ret_767; \ +#define vqrdmulhs_laneq_s32(__p0_663, __p1_663, __p2_663) __extension__ ({ \ + int32_t __ret_663; \ + int32_t __s0_663 = __p0_663; \ + int32x4_t __s1_663 = __p1_663; \ + __ret_663 = vqrdmulhs_s32(__s0_663, vgetq_lane_s32(__s1_663, __p2_663)); \ + __ret_663; \ }) #else -#define vqrdmulhs_laneq_s32(__p0_768, __p1_768, __p2_768) __extension__ ({ \ - int32_t __ret_768; \ - int32_t __s0_768 = __p0_768; \ - int32x4_t __s1_768 = __p1_768; \ - int32x4_t __rev1_768; __rev1_768 = __builtin_shufflevector(__s1_768, __s1_768, 3, 2, 1, 0); \ - __ret_768 = vqrdmulhs_s32(__s0_768, __noswap_vgetq_lane_s32(__rev1_768, __p2_768)); \ - __ret_768; \ +#define vqrdmulhs_laneq_s32(__p0_664, __p1_664, __p2_664) __extension__ ({ \ + int32_t __ret_664; \ + int32_t __s0_664 = __p0_664; \ + int32x4_t __s1_664 = __p1_664; \ + int32x4_t __rev1_664; __rev1_664 = __builtin_shufflevector(__s1_664, __s1_664, 3, 2, 1, 0); \ + __ret_664 = vqrdmulhs_s32(__s0_664, __noswap_vgetq_lane_s32(__rev1_664, __p2_664)); \ + __ret_664; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vqrdmulhh_laneq_s16(__p0_769, __p1_769, __p2_769) __extension__ ({ \ - int16_t __ret_769; \ - int16_t __s0_769 = __p0_769; \ - int16x8_t __s1_769 = __p1_769; \ - __ret_769 = vqrdmulhh_s16(__s0_769, vgetq_lane_s16(__s1_769, __p2_769)); \ - __ret_769; \ +#define vqrdmulhh_laneq_s16(__p0_665, __p1_665, __p2_665) __extension__ ({ \ + int16_t __ret_665; \ + int16_t __s0_665 = __p0_665; \ + int16x8_t __s1_665 = __p1_665; \ + __ret_665 = vqrdmulhh_s16(__s0_665, vgetq_lane_s16(__s1_665, __p2_665)); \ + __ret_665; \ }) #else -#define vqrdmulhh_laneq_s16(__p0_770, __p1_770, __p2_770) __extension__ ({ \ - int16_t __ret_770; \ - int16_t __s0_770 = __p0_770; \ - int16x8_t __s1_770 = __p1_770; \ - int16x8_t __rev1_770; __rev1_770 = __builtin_shufflevector(__s1_770, __s1_770, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_770 = vqrdmulhh_s16(__s0_770, __noswap_vgetq_lane_s16(__rev1_770, __p2_770)); \ - __ret_770; \ +#define vqrdmulhh_laneq_s16(__p0_666, __p1_666, __p2_666) __extension__ ({ \ + int16_t __ret_666; \ + int16_t __s0_666 = __p0_666; \ + int16x8_t __s1_666 = __p1_666; \ + int16x8_t __rev1_666; __rev1_666 = __builtin_shufflevector(__s1_666, __s1_666, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_666 = vqrdmulhh_s16(__s0_666, __noswap_vgetq_lane_s16(__rev1_666, __p2_666)); \ + __ret_666; \ }) #endif @@ -60360,128 +54432,128 @@ __ai int16_t vqrshlh_s16(int16_t __p0, int16_t __p1) { return __ret; } #ifdef __LITTLE_ENDIAN__ -#define vqrshrn_high_n_u32(__p0_771, __p1_771, __p2_771) __extension__ ({ \ - uint16x8_t __ret_771; \ - uint16x4_t __s0_771 = __p0_771; \ - uint32x4_t __s1_771 = __p1_771; \ - __ret_771 = (uint16x8_t)(vcombine_u16((uint16x4_t)(__s0_771), (uint16x4_t)(vqrshrn_n_u32(__s1_771, __p2_771)))); \ - __ret_771; \ +#define vqrshrn_high_n_u32(__p0_667, __p1_667, __p2_667) __extension__ ({ \ + uint16x8_t __ret_667; \ + uint16x4_t __s0_667 = __p0_667; \ + uint32x4_t __s1_667 = __p1_667; \ + __ret_667 = (uint16x8_t)(vcombine_u16((uint16x4_t)(__s0_667), (uint16x4_t)(vqrshrn_n_u32(__s1_667, __p2_667)))); \ + __ret_667; \ }) #else -#define vqrshrn_high_n_u32(__p0_772, __p1_772, __p2_772) __extension__ ({ \ - uint16x8_t __ret_772; \ - uint16x4_t __s0_772 = __p0_772; \ - uint32x4_t __s1_772 = __p1_772; \ - uint16x4_t __rev0_772; __rev0_772 = __builtin_shufflevector(__s0_772, __s0_772, 3, 2, 1, 0); \ - uint32x4_t __rev1_772; __rev1_772 = __builtin_shufflevector(__s1_772, __s1_772, 3, 2, 1, 0); \ - __ret_772 = (uint16x8_t)(__noswap_vcombine_u16((uint16x4_t)(__rev0_772), (uint16x4_t)(__noswap_vqrshrn_n_u32(__rev1_772, __p2_772)))); \ - __ret_772 = __builtin_shufflevector(__ret_772, __ret_772, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_772; \ +#define vqrshrn_high_n_u32(__p0_668, __p1_668, __p2_668) __extension__ ({ \ + uint16x8_t __ret_668; \ + uint16x4_t __s0_668 = __p0_668; \ + uint32x4_t __s1_668 = __p1_668; \ + uint16x4_t __rev0_668; __rev0_668 = __builtin_shufflevector(__s0_668, __s0_668, 3, 2, 1, 0); \ + uint32x4_t __rev1_668; __rev1_668 = __builtin_shufflevector(__s1_668, __s1_668, 3, 2, 1, 0); \ + __ret_668 = (uint16x8_t)(__noswap_vcombine_u16((uint16x4_t)(__rev0_668), (uint16x4_t)(__noswap_vqrshrn_n_u32(__rev1_668, __p2_668)))); \ + __ret_668 = __builtin_shufflevector(__ret_668, __ret_668, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_668; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vqrshrn_high_n_u64(__p0_773, __p1_773, __p2_773) __extension__ ({ \ - uint32x4_t __ret_773; \ - uint32x2_t __s0_773 = __p0_773; \ - uint64x2_t __s1_773 = __p1_773; \ - __ret_773 = (uint32x4_t)(vcombine_u32((uint32x2_t)(__s0_773), (uint32x2_t)(vqrshrn_n_u64(__s1_773, __p2_773)))); \ - __ret_773; \ +#define vqrshrn_high_n_u64(__p0_669, __p1_669, __p2_669) __extension__ ({ \ + uint32x4_t __ret_669; \ + uint32x2_t __s0_669 = __p0_669; \ + uint64x2_t __s1_669 = __p1_669; \ + __ret_669 = (uint32x4_t)(vcombine_u32((uint32x2_t)(__s0_669), (uint32x2_t)(vqrshrn_n_u64(__s1_669, __p2_669)))); \ + __ret_669; \ }) #else -#define vqrshrn_high_n_u64(__p0_774, __p1_774, __p2_774) __extension__ ({ \ - uint32x4_t __ret_774; \ - uint32x2_t __s0_774 = __p0_774; \ - uint64x2_t __s1_774 = __p1_774; \ - uint32x2_t __rev0_774; __rev0_774 = __builtin_shufflevector(__s0_774, __s0_774, 1, 0); \ - uint64x2_t __rev1_774; __rev1_774 = __builtin_shufflevector(__s1_774, __s1_774, 1, 0); \ - __ret_774 = (uint32x4_t)(__noswap_vcombine_u32((uint32x2_t)(__rev0_774), (uint32x2_t)(__noswap_vqrshrn_n_u64(__rev1_774, __p2_774)))); \ - __ret_774 = __builtin_shufflevector(__ret_774, __ret_774, 3, 2, 1, 0); \ - __ret_774; \ +#define vqrshrn_high_n_u64(__p0_670, __p1_670, __p2_670) __extension__ ({ \ + uint32x4_t __ret_670; \ + uint32x2_t __s0_670 = __p0_670; \ + uint64x2_t __s1_670 = __p1_670; \ + uint32x2_t __rev0_670; __rev0_670 = __builtin_shufflevector(__s0_670, __s0_670, 1, 0); \ + uint64x2_t __rev1_670; __rev1_670 = __builtin_shufflevector(__s1_670, __s1_670, 1, 0); \ + __ret_670 = (uint32x4_t)(__noswap_vcombine_u32((uint32x2_t)(__rev0_670), (uint32x2_t)(__noswap_vqrshrn_n_u64(__rev1_670, __p2_670)))); \ + __ret_670 = __builtin_shufflevector(__ret_670, __ret_670, 3, 2, 1, 0); \ + __ret_670; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vqrshrn_high_n_u16(__p0_775, __p1_775, __p2_775) __extension__ ({ \ - uint8x16_t __ret_775; \ - uint8x8_t __s0_775 = __p0_775; \ - uint16x8_t __s1_775 = __p1_775; \ - __ret_775 = (uint8x16_t)(vcombine_u8((uint8x8_t)(__s0_775), (uint8x8_t)(vqrshrn_n_u16(__s1_775, __p2_775)))); \ - __ret_775; \ +#define vqrshrn_high_n_u16(__p0_671, __p1_671, __p2_671) __extension__ ({ \ + uint8x16_t __ret_671; \ + uint8x8_t __s0_671 = __p0_671; \ + uint16x8_t __s1_671 = __p1_671; \ + __ret_671 = (uint8x16_t)(vcombine_u8((uint8x8_t)(__s0_671), (uint8x8_t)(vqrshrn_n_u16(__s1_671, __p2_671)))); \ + __ret_671; \ }) #else -#define vqrshrn_high_n_u16(__p0_776, __p1_776, __p2_776) __extension__ ({ \ - uint8x16_t __ret_776; \ - uint8x8_t __s0_776 = __p0_776; \ - uint16x8_t __s1_776 = __p1_776; \ - uint8x8_t __rev0_776; __rev0_776 = __builtin_shufflevector(__s0_776, __s0_776, 7, 6, 5, 4, 3, 2, 1, 0); \ - uint16x8_t __rev1_776; __rev1_776 = __builtin_shufflevector(__s1_776, __s1_776, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_776 = (uint8x16_t)(__noswap_vcombine_u8((uint8x8_t)(__rev0_776), (uint8x8_t)(__noswap_vqrshrn_n_u16(__rev1_776, __p2_776)))); \ - __ret_776 = __builtin_shufflevector(__ret_776, __ret_776, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_776; \ +#define vqrshrn_high_n_u16(__p0_672, __p1_672, __p2_672) __extension__ ({ \ + uint8x16_t __ret_672; \ + uint8x8_t __s0_672 = __p0_672; \ + uint16x8_t __s1_672 = __p1_672; \ + uint8x8_t __rev0_672; __rev0_672 = __builtin_shufflevector(__s0_672, __s0_672, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint16x8_t __rev1_672; __rev1_672 = __builtin_shufflevector(__s1_672, __s1_672, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_672 = (uint8x16_t)(__noswap_vcombine_u8((uint8x8_t)(__rev0_672), (uint8x8_t)(__noswap_vqrshrn_n_u16(__rev1_672, __p2_672)))); \ + __ret_672 = __builtin_shufflevector(__ret_672, __ret_672, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_672; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vqrshrn_high_n_s32(__p0_777, __p1_777, __p2_777) __extension__ ({ \ - int16x8_t __ret_777; \ - int16x4_t __s0_777 = __p0_777; \ - int32x4_t __s1_777 = __p1_777; \ - __ret_777 = (int16x8_t)(vcombine_s16((int16x4_t)(__s0_777), (int16x4_t)(vqrshrn_n_s32(__s1_777, __p2_777)))); \ - __ret_777; \ +#define vqrshrn_high_n_s32(__p0_673, __p1_673, __p2_673) __extension__ ({ \ + int16x8_t __ret_673; \ + int16x4_t __s0_673 = __p0_673; \ + int32x4_t __s1_673 = __p1_673; \ + __ret_673 = (int16x8_t)(vcombine_s16((int16x4_t)(__s0_673), (int16x4_t)(vqrshrn_n_s32(__s1_673, __p2_673)))); \ + __ret_673; \ }) #else -#define vqrshrn_high_n_s32(__p0_778, __p1_778, __p2_778) __extension__ ({ \ - int16x8_t __ret_778; \ - int16x4_t __s0_778 = __p0_778; \ - int32x4_t __s1_778 = __p1_778; \ - int16x4_t __rev0_778; __rev0_778 = __builtin_shufflevector(__s0_778, __s0_778, 3, 2, 1, 0); \ - int32x4_t __rev1_778; __rev1_778 = __builtin_shufflevector(__s1_778, __s1_778, 3, 2, 1, 0); \ - __ret_778 = (int16x8_t)(__noswap_vcombine_s16((int16x4_t)(__rev0_778), (int16x4_t)(__noswap_vqrshrn_n_s32(__rev1_778, __p2_778)))); \ - __ret_778 = __builtin_shufflevector(__ret_778, __ret_778, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_778; \ +#define vqrshrn_high_n_s32(__p0_674, __p1_674, __p2_674) __extension__ ({ \ + int16x8_t __ret_674; \ + int16x4_t __s0_674 = __p0_674; \ + int32x4_t __s1_674 = __p1_674; \ + int16x4_t __rev0_674; __rev0_674 = __builtin_shufflevector(__s0_674, __s0_674, 3, 2, 1, 0); \ + int32x4_t __rev1_674; __rev1_674 = __builtin_shufflevector(__s1_674, __s1_674, 3, 2, 1, 0); \ + __ret_674 = (int16x8_t)(__noswap_vcombine_s16((int16x4_t)(__rev0_674), (int16x4_t)(__noswap_vqrshrn_n_s32(__rev1_674, __p2_674)))); \ + __ret_674 = __builtin_shufflevector(__ret_674, __ret_674, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_674; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vqrshrn_high_n_s64(__p0_779, __p1_779, __p2_779) __extension__ ({ \ - int32x4_t __ret_779; \ - int32x2_t __s0_779 = __p0_779; \ - int64x2_t __s1_779 = __p1_779; \ - __ret_779 = (int32x4_t)(vcombine_s32((int32x2_t)(__s0_779), (int32x2_t)(vqrshrn_n_s64(__s1_779, __p2_779)))); \ - __ret_779; \ +#define vqrshrn_high_n_s64(__p0_675, __p1_675, __p2_675) __extension__ ({ \ + int32x4_t __ret_675; \ + int32x2_t __s0_675 = __p0_675; \ + int64x2_t __s1_675 = __p1_675; \ + __ret_675 = (int32x4_t)(vcombine_s32((int32x2_t)(__s0_675), (int32x2_t)(vqrshrn_n_s64(__s1_675, __p2_675)))); \ + __ret_675; \ }) #else -#define vqrshrn_high_n_s64(__p0_780, __p1_780, __p2_780) __extension__ ({ \ - int32x4_t __ret_780; \ - int32x2_t __s0_780 = __p0_780; \ - int64x2_t __s1_780 = __p1_780; \ - int32x2_t __rev0_780; __rev0_780 = __builtin_shufflevector(__s0_780, __s0_780, 1, 0); \ - int64x2_t __rev1_780; __rev1_780 = __builtin_shufflevector(__s1_780, __s1_780, 1, 0); \ - __ret_780 = (int32x4_t)(__noswap_vcombine_s32((int32x2_t)(__rev0_780), (int32x2_t)(__noswap_vqrshrn_n_s64(__rev1_780, __p2_780)))); \ - __ret_780 = __builtin_shufflevector(__ret_780, __ret_780, 3, 2, 1, 0); \ - __ret_780; \ +#define vqrshrn_high_n_s64(__p0_676, __p1_676, __p2_676) __extension__ ({ \ + int32x4_t __ret_676; \ + int32x2_t __s0_676 = __p0_676; \ + int64x2_t __s1_676 = __p1_676; \ + int32x2_t __rev0_676; __rev0_676 = __builtin_shufflevector(__s0_676, __s0_676, 1, 0); \ + int64x2_t __rev1_676; __rev1_676 = __builtin_shufflevector(__s1_676, __s1_676, 1, 0); \ + __ret_676 = (int32x4_t)(__noswap_vcombine_s32((int32x2_t)(__rev0_676), (int32x2_t)(__noswap_vqrshrn_n_s64(__rev1_676, __p2_676)))); \ + __ret_676 = __builtin_shufflevector(__ret_676, __ret_676, 3, 2, 1, 0); \ + __ret_676; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vqrshrn_high_n_s16(__p0_781, __p1_781, __p2_781) __extension__ ({ \ - int8x16_t __ret_781; \ - int8x8_t __s0_781 = __p0_781; \ - int16x8_t __s1_781 = __p1_781; \ - __ret_781 = (int8x16_t)(vcombine_s8((int8x8_t)(__s0_781), (int8x8_t)(vqrshrn_n_s16(__s1_781, __p2_781)))); \ - __ret_781; \ +#define vqrshrn_high_n_s16(__p0_677, __p1_677, __p2_677) __extension__ ({ \ + int8x16_t __ret_677; \ + int8x8_t __s0_677 = __p0_677; \ + int16x8_t __s1_677 = __p1_677; \ + __ret_677 = (int8x16_t)(vcombine_s8((int8x8_t)(__s0_677), (int8x8_t)(vqrshrn_n_s16(__s1_677, __p2_677)))); \ + __ret_677; \ }) #else -#define vqrshrn_high_n_s16(__p0_782, __p1_782, __p2_782) __extension__ ({ \ - int8x16_t __ret_782; \ - int8x8_t __s0_782 = __p0_782; \ - int16x8_t __s1_782 = __p1_782; \ - int8x8_t __rev0_782; __rev0_782 = __builtin_shufflevector(__s0_782, __s0_782, 7, 6, 5, 4, 3, 2, 1, 0); \ - int16x8_t __rev1_782; __rev1_782 = __builtin_shufflevector(__s1_782, __s1_782, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_782 = (int8x16_t)(__noswap_vcombine_s8((int8x8_t)(__rev0_782), (int8x8_t)(__noswap_vqrshrn_n_s16(__rev1_782, __p2_782)))); \ - __ret_782 = __builtin_shufflevector(__ret_782, __ret_782, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_782; \ +#define vqrshrn_high_n_s16(__p0_678, __p1_678, __p2_678) __extension__ ({ \ + int8x16_t __ret_678; \ + int8x8_t __s0_678 = __p0_678; \ + int16x8_t __s1_678 = __p1_678; \ + int8x8_t __rev0_678; __rev0_678 = __builtin_shufflevector(__s0_678, __s0_678, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __rev1_678; __rev1_678 = __builtin_shufflevector(__s1_678, __s1_678, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_678 = (int8x16_t)(__noswap_vcombine_s8((int8x8_t)(__rev0_678), (int8x8_t)(__noswap_vqrshrn_n_s16(__rev1_678, __p2_678)))); \ + __ret_678 = __builtin_shufflevector(__ret_678, __ret_678, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_678; \ }) #endif @@ -60522,65 +54594,65 @@ __ai int16_t vqrshlh_s16(int16_t __p0, int16_t __p1) { __ret; \ }) #ifdef __LITTLE_ENDIAN__ -#define vqrshrun_high_n_s32(__p0_783, __p1_783, __p2_783) __extension__ ({ \ - int16x8_t __ret_783; \ - int16x4_t __s0_783 = __p0_783; \ - int32x4_t __s1_783 = __p1_783; \ - __ret_783 = (int16x8_t)(vcombine_s16((int16x4_t)(__s0_783), (int16x4_t)(vqrshrun_n_s32(__s1_783, __p2_783)))); \ - __ret_783; \ +#define vqrshrun_high_n_s32(__p0_679, __p1_679, __p2_679) __extension__ ({ \ + int16x8_t __ret_679; \ + int16x4_t __s0_679 = __p0_679; \ + int32x4_t __s1_679 = __p1_679; \ + __ret_679 = (int16x8_t)(vcombine_s16((int16x4_t)(__s0_679), (int16x4_t)(vqrshrun_n_s32(__s1_679, __p2_679)))); \ + __ret_679; \ }) #else -#define vqrshrun_high_n_s32(__p0_784, __p1_784, __p2_784) __extension__ ({ \ - int16x8_t __ret_784; \ - int16x4_t __s0_784 = __p0_784; \ - int32x4_t __s1_784 = __p1_784; \ - int16x4_t __rev0_784; __rev0_784 = __builtin_shufflevector(__s0_784, __s0_784, 3, 2, 1, 0); \ - int32x4_t __rev1_784; __rev1_784 = __builtin_shufflevector(__s1_784, __s1_784, 3, 2, 1, 0); \ - __ret_784 = (int16x8_t)(__noswap_vcombine_s16((int16x4_t)(__rev0_784), (int16x4_t)(__noswap_vqrshrun_n_s32(__rev1_784, __p2_784)))); \ - __ret_784 = __builtin_shufflevector(__ret_784, __ret_784, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_784; \ +#define vqrshrun_high_n_s32(__p0_680, __p1_680, __p2_680) __extension__ ({ \ + int16x8_t __ret_680; \ + int16x4_t __s0_680 = __p0_680; \ + int32x4_t __s1_680 = __p1_680; \ + int16x4_t __rev0_680; __rev0_680 = __builtin_shufflevector(__s0_680, __s0_680, 3, 2, 1, 0); \ + int32x4_t __rev1_680; __rev1_680 = __builtin_shufflevector(__s1_680, __s1_680, 3, 2, 1, 0); \ + __ret_680 = (int16x8_t)(__noswap_vcombine_s16((int16x4_t)(__rev0_680), (int16x4_t)(__noswap_vqrshrun_n_s32(__rev1_680, __p2_680)))); \ + __ret_680 = __builtin_shufflevector(__ret_680, __ret_680, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_680; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vqrshrun_high_n_s64(__p0_785, __p1_785, __p2_785) __extension__ ({ \ - int32x4_t __ret_785; \ - int32x2_t __s0_785 = __p0_785; \ - int64x2_t __s1_785 = __p1_785; \ - __ret_785 = (int32x4_t)(vcombine_s32((int32x2_t)(__s0_785), (int32x2_t)(vqrshrun_n_s64(__s1_785, __p2_785)))); \ - __ret_785; \ +#define vqrshrun_high_n_s64(__p0_681, __p1_681, __p2_681) __extension__ ({ \ + int32x4_t __ret_681; \ + int32x2_t __s0_681 = __p0_681; \ + int64x2_t __s1_681 = __p1_681; \ + __ret_681 = (int32x4_t)(vcombine_s32((int32x2_t)(__s0_681), (int32x2_t)(vqrshrun_n_s64(__s1_681, __p2_681)))); \ + __ret_681; \ }) #else -#define vqrshrun_high_n_s64(__p0_786, __p1_786, __p2_786) __extension__ ({ \ - int32x4_t __ret_786; \ - int32x2_t __s0_786 = __p0_786; \ - int64x2_t __s1_786 = __p1_786; \ - int32x2_t __rev0_786; __rev0_786 = __builtin_shufflevector(__s0_786, __s0_786, 1, 0); \ - int64x2_t __rev1_786; __rev1_786 = __builtin_shufflevector(__s1_786, __s1_786, 1, 0); \ - __ret_786 = (int32x4_t)(__noswap_vcombine_s32((int32x2_t)(__rev0_786), (int32x2_t)(__noswap_vqrshrun_n_s64(__rev1_786, __p2_786)))); \ - __ret_786 = __builtin_shufflevector(__ret_786, __ret_786, 3, 2, 1, 0); \ - __ret_786; \ +#define vqrshrun_high_n_s64(__p0_682, __p1_682, __p2_682) __extension__ ({ \ + int32x4_t __ret_682; \ + int32x2_t __s0_682 = __p0_682; \ + int64x2_t __s1_682 = __p1_682; \ + int32x2_t __rev0_682; __rev0_682 = __builtin_shufflevector(__s0_682, __s0_682, 1, 0); \ + int64x2_t __rev1_682; __rev1_682 = __builtin_shufflevector(__s1_682, __s1_682, 1, 0); \ + __ret_682 = (int32x4_t)(__noswap_vcombine_s32((int32x2_t)(__rev0_682), (int32x2_t)(__noswap_vqrshrun_n_s64(__rev1_682, __p2_682)))); \ + __ret_682 = __builtin_shufflevector(__ret_682, __ret_682, 3, 2, 1, 0); \ + __ret_682; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vqrshrun_high_n_s16(__p0_787, __p1_787, __p2_787) __extension__ ({ \ - int8x16_t __ret_787; \ - int8x8_t __s0_787 = __p0_787; \ - int16x8_t __s1_787 = __p1_787; \ - __ret_787 = (int8x16_t)(vcombine_s8((int8x8_t)(__s0_787), (int8x8_t)(vqrshrun_n_s16(__s1_787, __p2_787)))); \ - __ret_787; \ +#define vqrshrun_high_n_s16(__p0_683, __p1_683, __p2_683) __extension__ ({ \ + int8x16_t __ret_683; \ + int8x8_t __s0_683 = __p0_683; \ + int16x8_t __s1_683 = __p1_683; \ + __ret_683 = (int8x16_t)(vcombine_s8((int8x8_t)(__s0_683), (int8x8_t)(vqrshrun_n_s16(__s1_683, __p2_683)))); \ + __ret_683; \ }) #else -#define vqrshrun_high_n_s16(__p0_788, __p1_788, __p2_788) __extension__ ({ \ - int8x16_t __ret_788; \ - int8x8_t __s0_788 = __p0_788; \ - int16x8_t __s1_788 = __p1_788; \ - int8x8_t __rev0_788; __rev0_788 = __builtin_shufflevector(__s0_788, __s0_788, 7, 6, 5, 4, 3, 2, 1, 0); \ - int16x8_t __rev1_788; __rev1_788 = __builtin_shufflevector(__s1_788, __s1_788, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_788 = (int8x16_t)(__noswap_vcombine_s8((int8x8_t)(__rev0_788), (int8x8_t)(__noswap_vqrshrun_n_s16(__rev1_788, __p2_788)))); \ - __ret_788 = __builtin_shufflevector(__ret_788, __ret_788, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_788; \ +#define vqrshrun_high_n_s16(__p0_684, __p1_684, __p2_684) __extension__ ({ \ + int8x16_t __ret_684; \ + int8x8_t __s0_684 = __p0_684; \ + int16x8_t __s1_684 = __p1_684; \ + int8x8_t __rev0_684; __rev0_684 = __builtin_shufflevector(__s0_684, __s0_684, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __rev1_684; __rev1_684 = __builtin_shufflevector(__s1_684, __s1_684, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_684 = (int8x16_t)(__noswap_vcombine_s8((int8x8_t)(__rev0_684), (int8x8_t)(__noswap_vqrshrun_n_s16(__rev1_684, __p2_684)))); \ + __ret_684 = __builtin_shufflevector(__ret_684, __ret_684, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_684; \ }) #endif @@ -60715,128 +54787,128 @@ __ai int16_t vqshlh_s16(int16_t __p0, int16_t __p1) { __ret; \ }) #ifdef __LITTLE_ENDIAN__ -#define vqshrn_high_n_u32(__p0_789, __p1_789, __p2_789) __extension__ ({ \ - uint16x8_t __ret_789; \ - uint16x4_t __s0_789 = __p0_789; \ - uint32x4_t __s1_789 = __p1_789; \ - __ret_789 = (uint16x8_t)(vcombine_u16((uint16x4_t)(__s0_789), (uint16x4_t)(vqshrn_n_u32(__s1_789, __p2_789)))); \ - __ret_789; \ +#define vqshrn_high_n_u32(__p0_685, __p1_685, __p2_685) __extension__ ({ \ + uint16x8_t __ret_685; \ + uint16x4_t __s0_685 = __p0_685; \ + uint32x4_t __s1_685 = __p1_685; \ + __ret_685 = (uint16x8_t)(vcombine_u16((uint16x4_t)(__s0_685), (uint16x4_t)(vqshrn_n_u32(__s1_685, __p2_685)))); \ + __ret_685; \ }) #else -#define vqshrn_high_n_u32(__p0_790, __p1_790, __p2_790) __extension__ ({ \ - uint16x8_t __ret_790; \ - uint16x4_t __s0_790 = __p0_790; \ - uint32x4_t __s1_790 = __p1_790; \ - uint16x4_t __rev0_790; __rev0_790 = __builtin_shufflevector(__s0_790, __s0_790, 3, 2, 1, 0); \ - uint32x4_t __rev1_790; __rev1_790 = __builtin_shufflevector(__s1_790, __s1_790, 3, 2, 1, 0); \ - __ret_790 = (uint16x8_t)(__noswap_vcombine_u16((uint16x4_t)(__rev0_790), (uint16x4_t)(__noswap_vqshrn_n_u32(__rev1_790, __p2_790)))); \ - __ret_790 = __builtin_shufflevector(__ret_790, __ret_790, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_790; \ +#define vqshrn_high_n_u32(__p0_686, __p1_686, __p2_686) __extension__ ({ \ + uint16x8_t __ret_686; \ + uint16x4_t __s0_686 = __p0_686; \ + uint32x4_t __s1_686 = __p1_686; \ + uint16x4_t __rev0_686; __rev0_686 = __builtin_shufflevector(__s0_686, __s0_686, 3, 2, 1, 0); \ + uint32x4_t __rev1_686; __rev1_686 = __builtin_shufflevector(__s1_686, __s1_686, 3, 2, 1, 0); \ + __ret_686 = (uint16x8_t)(__noswap_vcombine_u16((uint16x4_t)(__rev0_686), (uint16x4_t)(__noswap_vqshrn_n_u32(__rev1_686, __p2_686)))); \ + __ret_686 = __builtin_shufflevector(__ret_686, __ret_686, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_686; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vqshrn_high_n_u64(__p0_791, __p1_791, __p2_791) __extension__ ({ \ - uint32x4_t __ret_791; \ - uint32x2_t __s0_791 = __p0_791; \ - uint64x2_t __s1_791 = __p1_791; \ - __ret_791 = (uint32x4_t)(vcombine_u32((uint32x2_t)(__s0_791), (uint32x2_t)(vqshrn_n_u64(__s1_791, __p2_791)))); \ - __ret_791; \ +#define vqshrn_high_n_u64(__p0_687, __p1_687, __p2_687) __extension__ ({ \ + uint32x4_t __ret_687; \ + uint32x2_t __s0_687 = __p0_687; \ + uint64x2_t __s1_687 = __p1_687; \ + __ret_687 = (uint32x4_t)(vcombine_u32((uint32x2_t)(__s0_687), (uint32x2_t)(vqshrn_n_u64(__s1_687, __p2_687)))); \ + __ret_687; \ }) #else -#define vqshrn_high_n_u64(__p0_792, __p1_792, __p2_792) __extension__ ({ \ - uint32x4_t __ret_792; \ - uint32x2_t __s0_792 = __p0_792; \ - uint64x2_t __s1_792 = __p1_792; \ - uint32x2_t __rev0_792; __rev0_792 = __builtin_shufflevector(__s0_792, __s0_792, 1, 0); \ - uint64x2_t __rev1_792; __rev1_792 = __builtin_shufflevector(__s1_792, __s1_792, 1, 0); \ - __ret_792 = (uint32x4_t)(__noswap_vcombine_u32((uint32x2_t)(__rev0_792), (uint32x2_t)(__noswap_vqshrn_n_u64(__rev1_792, __p2_792)))); \ - __ret_792 = __builtin_shufflevector(__ret_792, __ret_792, 3, 2, 1, 0); \ - __ret_792; \ +#define vqshrn_high_n_u64(__p0_688, __p1_688, __p2_688) __extension__ ({ \ + uint32x4_t __ret_688; \ + uint32x2_t __s0_688 = __p0_688; \ + uint64x2_t __s1_688 = __p1_688; \ + uint32x2_t __rev0_688; __rev0_688 = __builtin_shufflevector(__s0_688, __s0_688, 1, 0); \ + uint64x2_t __rev1_688; __rev1_688 = __builtin_shufflevector(__s1_688, __s1_688, 1, 0); \ + __ret_688 = (uint32x4_t)(__noswap_vcombine_u32((uint32x2_t)(__rev0_688), (uint32x2_t)(__noswap_vqshrn_n_u64(__rev1_688, __p2_688)))); \ + __ret_688 = __builtin_shufflevector(__ret_688, __ret_688, 3, 2, 1, 0); \ + __ret_688; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vqshrn_high_n_u16(__p0_793, __p1_793, __p2_793) __extension__ ({ \ - uint8x16_t __ret_793; \ - uint8x8_t __s0_793 = __p0_793; \ - uint16x8_t __s1_793 = __p1_793; \ - __ret_793 = (uint8x16_t)(vcombine_u8((uint8x8_t)(__s0_793), (uint8x8_t)(vqshrn_n_u16(__s1_793, __p2_793)))); \ - __ret_793; \ +#define vqshrn_high_n_u16(__p0_689, __p1_689, __p2_689) __extension__ ({ \ + uint8x16_t __ret_689; \ + uint8x8_t __s0_689 = __p0_689; \ + uint16x8_t __s1_689 = __p1_689; \ + __ret_689 = (uint8x16_t)(vcombine_u8((uint8x8_t)(__s0_689), (uint8x8_t)(vqshrn_n_u16(__s1_689, __p2_689)))); \ + __ret_689; \ }) #else -#define vqshrn_high_n_u16(__p0_794, __p1_794, __p2_794) __extension__ ({ \ - uint8x16_t __ret_794; \ - uint8x8_t __s0_794 = __p0_794; \ - uint16x8_t __s1_794 = __p1_794; \ - uint8x8_t __rev0_794; __rev0_794 = __builtin_shufflevector(__s0_794, __s0_794, 7, 6, 5, 4, 3, 2, 1, 0); \ - uint16x8_t __rev1_794; __rev1_794 = __builtin_shufflevector(__s1_794, __s1_794, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_794 = (uint8x16_t)(__noswap_vcombine_u8((uint8x8_t)(__rev0_794), (uint8x8_t)(__noswap_vqshrn_n_u16(__rev1_794, __p2_794)))); \ - __ret_794 = __builtin_shufflevector(__ret_794, __ret_794, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_794; \ +#define vqshrn_high_n_u16(__p0_690, __p1_690, __p2_690) __extension__ ({ \ + uint8x16_t __ret_690; \ + uint8x8_t __s0_690 = __p0_690; \ + uint16x8_t __s1_690 = __p1_690; \ + uint8x8_t __rev0_690; __rev0_690 = __builtin_shufflevector(__s0_690, __s0_690, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint16x8_t __rev1_690; __rev1_690 = __builtin_shufflevector(__s1_690, __s1_690, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_690 = (uint8x16_t)(__noswap_vcombine_u8((uint8x8_t)(__rev0_690), (uint8x8_t)(__noswap_vqshrn_n_u16(__rev1_690, __p2_690)))); \ + __ret_690 = __builtin_shufflevector(__ret_690, __ret_690, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_690; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vqshrn_high_n_s32(__p0_795, __p1_795, __p2_795) __extension__ ({ \ - int16x8_t __ret_795; \ - int16x4_t __s0_795 = __p0_795; \ - int32x4_t __s1_795 = __p1_795; \ - __ret_795 = (int16x8_t)(vcombine_s16((int16x4_t)(__s0_795), (int16x4_t)(vqshrn_n_s32(__s1_795, __p2_795)))); \ - __ret_795; \ +#define vqshrn_high_n_s32(__p0_691, __p1_691, __p2_691) __extension__ ({ \ + int16x8_t __ret_691; \ + int16x4_t __s0_691 = __p0_691; \ + int32x4_t __s1_691 = __p1_691; \ + __ret_691 = (int16x8_t)(vcombine_s16((int16x4_t)(__s0_691), (int16x4_t)(vqshrn_n_s32(__s1_691, __p2_691)))); \ + __ret_691; \ }) #else -#define vqshrn_high_n_s32(__p0_796, __p1_796, __p2_796) __extension__ ({ \ - int16x8_t __ret_796; \ - int16x4_t __s0_796 = __p0_796; \ - int32x4_t __s1_796 = __p1_796; \ - int16x4_t __rev0_796; __rev0_796 = __builtin_shufflevector(__s0_796, __s0_796, 3, 2, 1, 0); \ - int32x4_t __rev1_796; __rev1_796 = __builtin_shufflevector(__s1_796, __s1_796, 3, 2, 1, 0); \ - __ret_796 = (int16x8_t)(__noswap_vcombine_s16((int16x4_t)(__rev0_796), (int16x4_t)(__noswap_vqshrn_n_s32(__rev1_796, __p2_796)))); \ - __ret_796 = __builtin_shufflevector(__ret_796, __ret_796, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_796; \ +#define vqshrn_high_n_s32(__p0_692, __p1_692, __p2_692) __extension__ ({ \ + int16x8_t __ret_692; \ + int16x4_t __s0_692 = __p0_692; \ + int32x4_t __s1_692 = __p1_692; \ + int16x4_t __rev0_692; __rev0_692 = __builtin_shufflevector(__s0_692, __s0_692, 3, 2, 1, 0); \ + int32x4_t __rev1_692; __rev1_692 = __builtin_shufflevector(__s1_692, __s1_692, 3, 2, 1, 0); \ + __ret_692 = (int16x8_t)(__noswap_vcombine_s16((int16x4_t)(__rev0_692), (int16x4_t)(__noswap_vqshrn_n_s32(__rev1_692, __p2_692)))); \ + __ret_692 = __builtin_shufflevector(__ret_692, __ret_692, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_692; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vqshrn_high_n_s64(__p0_797, __p1_797, __p2_797) __extension__ ({ \ - int32x4_t __ret_797; \ - int32x2_t __s0_797 = __p0_797; \ - int64x2_t __s1_797 = __p1_797; \ - __ret_797 = (int32x4_t)(vcombine_s32((int32x2_t)(__s0_797), (int32x2_t)(vqshrn_n_s64(__s1_797, __p2_797)))); \ - __ret_797; \ +#define vqshrn_high_n_s64(__p0_693, __p1_693, __p2_693) __extension__ ({ \ + int32x4_t __ret_693; \ + int32x2_t __s0_693 = __p0_693; \ + int64x2_t __s1_693 = __p1_693; \ + __ret_693 = (int32x4_t)(vcombine_s32((int32x2_t)(__s0_693), (int32x2_t)(vqshrn_n_s64(__s1_693, __p2_693)))); \ + __ret_693; \ }) #else -#define vqshrn_high_n_s64(__p0_798, __p1_798, __p2_798) __extension__ ({ \ - int32x4_t __ret_798; \ - int32x2_t __s0_798 = __p0_798; \ - int64x2_t __s1_798 = __p1_798; \ - int32x2_t __rev0_798; __rev0_798 = __builtin_shufflevector(__s0_798, __s0_798, 1, 0); \ - int64x2_t __rev1_798; __rev1_798 = __builtin_shufflevector(__s1_798, __s1_798, 1, 0); \ - __ret_798 = (int32x4_t)(__noswap_vcombine_s32((int32x2_t)(__rev0_798), (int32x2_t)(__noswap_vqshrn_n_s64(__rev1_798, __p2_798)))); \ - __ret_798 = __builtin_shufflevector(__ret_798, __ret_798, 3, 2, 1, 0); \ - __ret_798; \ +#define vqshrn_high_n_s64(__p0_694, __p1_694, __p2_694) __extension__ ({ \ + int32x4_t __ret_694; \ + int32x2_t __s0_694 = __p0_694; \ + int64x2_t __s1_694 = __p1_694; \ + int32x2_t __rev0_694; __rev0_694 = __builtin_shufflevector(__s0_694, __s0_694, 1, 0); \ + int64x2_t __rev1_694; __rev1_694 = __builtin_shufflevector(__s1_694, __s1_694, 1, 0); \ + __ret_694 = (int32x4_t)(__noswap_vcombine_s32((int32x2_t)(__rev0_694), (int32x2_t)(__noswap_vqshrn_n_s64(__rev1_694, __p2_694)))); \ + __ret_694 = __builtin_shufflevector(__ret_694, __ret_694, 3, 2, 1, 0); \ + __ret_694; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vqshrn_high_n_s16(__p0_799, __p1_799, __p2_799) __extension__ ({ \ - int8x16_t __ret_799; \ - int8x8_t __s0_799 = __p0_799; \ - int16x8_t __s1_799 = __p1_799; \ - __ret_799 = (int8x16_t)(vcombine_s8((int8x8_t)(__s0_799), (int8x8_t)(vqshrn_n_s16(__s1_799, __p2_799)))); \ - __ret_799; \ +#define vqshrn_high_n_s16(__p0_695, __p1_695, __p2_695) __extension__ ({ \ + int8x16_t __ret_695; \ + int8x8_t __s0_695 = __p0_695; \ + int16x8_t __s1_695 = __p1_695; \ + __ret_695 = (int8x16_t)(vcombine_s8((int8x8_t)(__s0_695), (int8x8_t)(vqshrn_n_s16(__s1_695, __p2_695)))); \ + __ret_695; \ }) #else -#define vqshrn_high_n_s16(__p0_800, __p1_800, __p2_800) __extension__ ({ \ - int8x16_t __ret_800; \ - int8x8_t __s0_800 = __p0_800; \ - int16x8_t __s1_800 = __p1_800; \ - int8x8_t __rev0_800; __rev0_800 = __builtin_shufflevector(__s0_800, __s0_800, 7, 6, 5, 4, 3, 2, 1, 0); \ - int16x8_t __rev1_800; __rev1_800 = __builtin_shufflevector(__s1_800, __s1_800, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_800 = (int8x16_t)(__noswap_vcombine_s8((int8x8_t)(__rev0_800), (int8x8_t)(__noswap_vqshrn_n_s16(__rev1_800, __p2_800)))); \ - __ret_800 = __builtin_shufflevector(__ret_800, __ret_800, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_800; \ +#define vqshrn_high_n_s16(__p0_696, __p1_696, __p2_696) __extension__ ({ \ + int8x16_t __ret_696; \ + int8x8_t __s0_696 = __p0_696; \ + int16x8_t __s1_696 = __p1_696; \ + int8x8_t __rev0_696; __rev0_696 = __builtin_shufflevector(__s0_696, __s0_696, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __rev1_696; __rev1_696 = __builtin_shufflevector(__s1_696, __s1_696, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_696 = (int8x16_t)(__noswap_vcombine_s8((int8x8_t)(__rev0_696), (int8x8_t)(__noswap_vqshrn_n_s16(__rev1_696, __p2_696)))); \ + __ret_696 = __builtin_shufflevector(__ret_696, __ret_696, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_696; \ }) #endif @@ -60877,65 +54949,65 @@ __ai int16_t vqshlh_s16(int16_t __p0, int16_t __p1) { __ret; \ }) #ifdef __LITTLE_ENDIAN__ -#define vqshrun_high_n_s32(__p0_801, __p1_801, __p2_801) __extension__ ({ \ - int16x8_t __ret_801; \ - int16x4_t __s0_801 = __p0_801; \ - int32x4_t __s1_801 = __p1_801; \ - __ret_801 = (int16x8_t)(vcombine_s16((int16x4_t)(__s0_801), (int16x4_t)(vqshrun_n_s32(__s1_801, __p2_801)))); \ - __ret_801; \ +#define vqshrun_high_n_s32(__p0_697, __p1_697, __p2_697) __extension__ ({ \ + int16x8_t __ret_697; \ + int16x4_t __s0_697 = __p0_697; \ + int32x4_t __s1_697 = __p1_697; \ + __ret_697 = (int16x8_t)(vcombine_s16((int16x4_t)(__s0_697), (int16x4_t)(vqshrun_n_s32(__s1_697, __p2_697)))); \ + __ret_697; \ }) #else -#define vqshrun_high_n_s32(__p0_802, __p1_802, __p2_802) __extension__ ({ \ - int16x8_t __ret_802; \ - int16x4_t __s0_802 = __p0_802; \ - int32x4_t __s1_802 = __p1_802; \ - int16x4_t __rev0_802; __rev0_802 = __builtin_shufflevector(__s0_802, __s0_802, 3, 2, 1, 0); \ - int32x4_t __rev1_802; __rev1_802 = __builtin_shufflevector(__s1_802, __s1_802, 3, 2, 1, 0); \ - __ret_802 = (int16x8_t)(__noswap_vcombine_s16((int16x4_t)(__rev0_802), (int16x4_t)(__noswap_vqshrun_n_s32(__rev1_802, __p2_802)))); \ - __ret_802 = __builtin_shufflevector(__ret_802, __ret_802, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_802; \ +#define vqshrun_high_n_s32(__p0_698, __p1_698, __p2_698) __extension__ ({ \ + int16x8_t __ret_698; \ + int16x4_t __s0_698 = __p0_698; \ + int32x4_t __s1_698 = __p1_698; \ + int16x4_t __rev0_698; __rev0_698 = __builtin_shufflevector(__s0_698, __s0_698, 3, 2, 1, 0); \ + int32x4_t __rev1_698; __rev1_698 = __builtin_shufflevector(__s1_698, __s1_698, 3, 2, 1, 0); \ + __ret_698 = (int16x8_t)(__noswap_vcombine_s16((int16x4_t)(__rev0_698), (int16x4_t)(__noswap_vqshrun_n_s32(__rev1_698, __p2_698)))); \ + __ret_698 = __builtin_shufflevector(__ret_698, __ret_698, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_698; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vqshrun_high_n_s64(__p0_803, __p1_803, __p2_803) __extension__ ({ \ - int32x4_t __ret_803; \ - int32x2_t __s0_803 = __p0_803; \ - int64x2_t __s1_803 = __p1_803; \ - __ret_803 = (int32x4_t)(vcombine_s32((int32x2_t)(__s0_803), (int32x2_t)(vqshrun_n_s64(__s1_803, __p2_803)))); \ - __ret_803; \ +#define vqshrun_high_n_s64(__p0_699, __p1_699, __p2_699) __extension__ ({ \ + int32x4_t __ret_699; \ + int32x2_t __s0_699 = __p0_699; \ + int64x2_t __s1_699 = __p1_699; \ + __ret_699 = (int32x4_t)(vcombine_s32((int32x2_t)(__s0_699), (int32x2_t)(vqshrun_n_s64(__s1_699, __p2_699)))); \ + __ret_699; \ }) #else -#define vqshrun_high_n_s64(__p0_804, __p1_804, __p2_804) __extension__ ({ \ - int32x4_t __ret_804; \ - int32x2_t __s0_804 = __p0_804; \ - int64x2_t __s1_804 = __p1_804; \ - int32x2_t __rev0_804; __rev0_804 = __builtin_shufflevector(__s0_804, __s0_804, 1, 0); \ - int64x2_t __rev1_804; __rev1_804 = __builtin_shufflevector(__s1_804, __s1_804, 1, 0); \ - __ret_804 = (int32x4_t)(__noswap_vcombine_s32((int32x2_t)(__rev0_804), (int32x2_t)(__noswap_vqshrun_n_s64(__rev1_804, __p2_804)))); \ - __ret_804 = __builtin_shufflevector(__ret_804, __ret_804, 3, 2, 1, 0); \ - __ret_804; \ +#define vqshrun_high_n_s64(__p0_700, __p1_700, __p2_700) __extension__ ({ \ + int32x4_t __ret_700; \ + int32x2_t __s0_700 = __p0_700; \ + int64x2_t __s1_700 = __p1_700; \ + int32x2_t __rev0_700; __rev0_700 = __builtin_shufflevector(__s0_700, __s0_700, 1, 0); \ + int64x2_t __rev1_700; __rev1_700 = __builtin_shufflevector(__s1_700, __s1_700, 1, 0); \ + __ret_700 = (int32x4_t)(__noswap_vcombine_s32((int32x2_t)(__rev0_700), (int32x2_t)(__noswap_vqshrun_n_s64(__rev1_700, __p2_700)))); \ + __ret_700 = __builtin_shufflevector(__ret_700, __ret_700, 3, 2, 1, 0); \ + __ret_700; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vqshrun_high_n_s16(__p0_805, __p1_805, __p2_805) __extension__ ({ \ - int8x16_t __ret_805; \ - int8x8_t __s0_805 = __p0_805; \ - int16x8_t __s1_805 = __p1_805; \ - __ret_805 = (int8x16_t)(vcombine_s8((int8x8_t)(__s0_805), (int8x8_t)(vqshrun_n_s16(__s1_805, __p2_805)))); \ - __ret_805; \ +#define vqshrun_high_n_s16(__p0_701, __p1_701, __p2_701) __extension__ ({ \ + int8x16_t __ret_701; \ + int8x8_t __s0_701 = __p0_701; \ + int16x8_t __s1_701 = __p1_701; \ + __ret_701 = (int8x16_t)(vcombine_s8((int8x8_t)(__s0_701), (int8x8_t)(vqshrun_n_s16(__s1_701, __p2_701)))); \ + __ret_701; \ }) #else -#define vqshrun_high_n_s16(__p0_806, __p1_806, __p2_806) __extension__ ({ \ - int8x16_t __ret_806; \ - int8x8_t __s0_806 = __p0_806; \ - int16x8_t __s1_806 = __p1_806; \ - int8x8_t __rev0_806; __rev0_806 = __builtin_shufflevector(__s0_806, __s0_806, 7, 6, 5, 4, 3, 2, 1, 0); \ - int16x8_t __rev1_806; __rev1_806 = __builtin_shufflevector(__s1_806, __s1_806, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_806 = (int8x16_t)(__noswap_vcombine_s8((int8x8_t)(__rev0_806), (int8x8_t)(__noswap_vqshrun_n_s16(__rev1_806, __p2_806)))); \ - __ret_806 = __builtin_shufflevector(__ret_806, __ret_806, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_806; \ +#define vqshrun_high_n_s16(__p0_702, __p1_702, __p2_702) __extension__ ({ \ + int8x16_t __ret_702; \ + int8x8_t __s0_702 = __p0_702; \ + int16x8_t __s1_702 = __p1_702; \ + int8x8_t __rev0_702; __rev0_702 = __builtin_shufflevector(__s0_702, __s0_702, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __rev1_702; __rev1_702 = __builtin_shufflevector(__s1_702, __s1_702, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_702 = (int8x16_t)(__noswap_vcombine_s8((int8x8_t)(__rev0_702), (int8x8_t)(__noswap_vqshrun_n_s16(__rev1_702, __p2_702)))); \ + __ret_702 = __builtin_shufflevector(__ret_702, __ret_702, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_702; \ }) #endif @@ -62222,6 +56294,1966 @@ __ai float32_t vrecpxs_f32(float32_t __p0) { __ret = (float32_t) __builtin_neon_vrecpxs_f32(__p0); return __ret; } +__ai poly8x8_t vreinterpret_p8_p64(poly64x1_t __p0) { + poly8x8_t __ret; + __ret = (poly8x8_t)(__p0); + return __ret; +} +__ai poly8x8_t vreinterpret_p8_p16(poly16x4_t __p0) { + poly8x8_t __ret; + __ret = (poly8x8_t)(__p0); + return __ret; +} +__ai poly8x8_t vreinterpret_p8_u8(uint8x8_t __p0) { + poly8x8_t __ret; + __ret = (poly8x8_t)(__p0); + return __ret; +} +__ai poly8x8_t vreinterpret_p8_u32(uint32x2_t __p0) { + poly8x8_t __ret; + __ret = (poly8x8_t)(__p0); + return __ret; +} +__ai poly8x8_t vreinterpret_p8_u64(uint64x1_t __p0) { + poly8x8_t __ret; + __ret = (poly8x8_t)(__p0); + return __ret; +} +__ai poly8x8_t vreinterpret_p8_u16(uint16x4_t __p0) { + poly8x8_t __ret; + __ret = (poly8x8_t)(__p0); + return __ret; +} +__ai poly8x8_t vreinterpret_p8_s8(int8x8_t __p0) { + poly8x8_t __ret; + __ret = (poly8x8_t)(__p0); + return __ret; +} +__ai poly8x8_t vreinterpret_p8_f64(float64x1_t __p0) { + poly8x8_t __ret; + __ret = (poly8x8_t)(__p0); + return __ret; +} +__ai poly8x8_t vreinterpret_p8_f32(float32x2_t __p0) { + poly8x8_t __ret; + __ret = (poly8x8_t)(__p0); + return __ret; +} +__ai poly8x8_t vreinterpret_p8_f16(float16x4_t __p0) { + poly8x8_t __ret; + __ret = (poly8x8_t)(__p0); + return __ret; +} +__ai poly8x8_t vreinterpret_p8_s32(int32x2_t __p0) { + poly8x8_t __ret; + __ret = (poly8x8_t)(__p0); + return __ret; +} +__ai poly8x8_t vreinterpret_p8_s64(int64x1_t __p0) { + poly8x8_t __ret; + __ret = (poly8x8_t)(__p0); + return __ret; +} +__ai poly8x8_t vreinterpret_p8_s16(int16x4_t __p0) { + poly8x8_t __ret; + __ret = (poly8x8_t)(__p0); + return __ret; +} +__ai poly64x1_t vreinterpret_p64_p8(poly8x8_t __p0) { + poly64x1_t __ret; + __ret = (poly64x1_t)(__p0); + return __ret; +} +__ai poly64x1_t vreinterpret_p64_p16(poly16x4_t __p0) { + poly64x1_t __ret; + __ret = (poly64x1_t)(__p0); + return __ret; +} +__ai poly64x1_t vreinterpret_p64_u8(uint8x8_t __p0) { + poly64x1_t __ret; + __ret = (poly64x1_t)(__p0); + return __ret; +} +__ai poly64x1_t vreinterpret_p64_u32(uint32x2_t __p0) { + poly64x1_t __ret; + __ret = (poly64x1_t)(__p0); + return __ret; +} +__ai poly64x1_t vreinterpret_p64_u64(uint64x1_t __p0) { + poly64x1_t __ret; + __ret = (poly64x1_t)(__p0); + return __ret; +} +__ai poly64x1_t vreinterpret_p64_u16(uint16x4_t __p0) { + poly64x1_t __ret; + __ret = (poly64x1_t)(__p0); + return __ret; +} +__ai poly64x1_t vreinterpret_p64_s8(int8x8_t __p0) { + poly64x1_t __ret; + __ret = (poly64x1_t)(__p0); + return __ret; +} +__ai poly64x1_t vreinterpret_p64_f64(float64x1_t __p0) { + poly64x1_t __ret; + __ret = (poly64x1_t)(__p0); + return __ret; +} +__ai poly64x1_t vreinterpret_p64_f32(float32x2_t __p0) { + poly64x1_t __ret; + __ret = (poly64x1_t)(__p0); + return __ret; +} +__ai poly64x1_t vreinterpret_p64_f16(float16x4_t __p0) { + poly64x1_t __ret; + __ret = (poly64x1_t)(__p0); + return __ret; +} +__ai poly64x1_t vreinterpret_p64_s32(int32x2_t __p0) { + poly64x1_t __ret; + __ret = (poly64x1_t)(__p0); + return __ret; +} +__ai poly64x1_t vreinterpret_p64_s64(int64x1_t __p0) { + poly64x1_t __ret; + __ret = (poly64x1_t)(__p0); + return __ret; +} +__ai poly64x1_t vreinterpret_p64_s16(int16x4_t __p0) { + poly64x1_t __ret; + __ret = (poly64x1_t)(__p0); + return __ret; +} +__ai poly16x4_t vreinterpret_p16_p8(poly8x8_t __p0) { + poly16x4_t __ret; + __ret = (poly16x4_t)(__p0); + return __ret; +} +__ai poly16x4_t vreinterpret_p16_p64(poly64x1_t __p0) { + poly16x4_t __ret; + __ret = (poly16x4_t)(__p0); + return __ret; +} +__ai poly16x4_t vreinterpret_p16_u8(uint8x8_t __p0) { + poly16x4_t __ret; + __ret = (poly16x4_t)(__p0); + return __ret; +} +__ai poly16x4_t vreinterpret_p16_u32(uint32x2_t __p0) { + poly16x4_t __ret; + __ret = (poly16x4_t)(__p0); + return __ret; +} +__ai poly16x4_t vreinterpret_p16_u64(uint64x1_t __p0) { + poly16x4_t __ret; + __ret = (poly16x4_t)(__p0); + return __ret; +} +__ai poly16x4_t vreinterpret_p16_u16(uint16x4_t __p0) { + poly16x4_t __ret; + __ret = (poly16x4_t)(__p0); + return __ret; +} +__ai poly16x4_t vreinterpret_p16_s8(int8x8_t __p0) { + poly16x4_t __ret; + __ret = (poly16x4_t)(__p0); + return __ret; +} +__ai poly16x4_t vreinterpret_p16_f64(float64x1_t __p0) { + poly16x4_t __ret; + __ret = (poly16x4_t)(__p0); + return __ret; +} +__ai poly16x4_t vreinterpret_p16_f32(float32x2_t __p0) { + poly16x4_t __ret; + __ret = (poly16x4_t)(__p0); + return __ret; +} +__ai poly16x4_t vreinterpret_p16_f16(float16x4_t __p0) { + poly16x4_t __ret; + __ret = (poly16x4_t)(__p0); + return __ret; +} +__ai poly16x4_t vreinterpret_p16_s32(int32x2_t __p0) { + poly16x4_t __ret; + __ret = (poly16x4_t)(__p0); + return __ret; +} +__ai poly16x4_t vreinterpret_p16_s64(int64x1_t __p0) { + poly16x4_t __ret; + __ret = (poly16x4_t)(__p0); + return __ret; +} +__ai poly16x4_t vreinterpret_p16_s16(int16x4_t __p0) { + poly16x4_t __ret; + __ret = (poly16x4_t)(__p0); + return __ret; +} +__ai poly8x16_t vreinterpretq_p8_p128(poly128_t __p0) { + poly8x16_t __ret; + __ret = (poly8x16_t)(__p0); + return __ret; +} +__ai poly8x16_t vreinterpretq_p8_p64(poly64x2_t __p0) { + poly8x16_t __ret; + __ret = (poly8x16_t)(__p0); + return __ret; +} +__ai poly8x16_t vreinterpretq_p8_p16(poly16x8_t __p0) { + poly8x16_t __ret; + __ret = (poly8x16_t)(__p0); + return __ret; +} +__ai poly8x16_t vreinterpretq_p8_u8(uint8x16_t __p0) { + poly8x16_t __ret; + __ret = (poly8x16_t)(__p0); + return __ret; +} +__ai poly8x16_t vreinterpretq_p8_u32(uint32x4_t __p0) { + poly8x16_t __ret; + __ret = (poly8x16_t)(__p0); + return __ret; +} +__ai poly8x16_t vreinterpretq_p8_u64(uint64x2_t __p0) { + poly8x16_t __ret; + __ret = (poly8x16_t)(__p0); + return __ret; +} +__ai poly8x16_t vreinterpretq_p8_u16(uint16x8_t __p0) { + poly8x16_t __ret; + __ret = (poly8x16_t)(__p0); + return __ret; +} +__ai poly8x16_t vreinterpretq_p8_s8(int8x16_t __p0) { + poly8x16_t __ret; + __ret = (poly8x16_t)(__p0); + return __ret; +} +__ai poly8x16_t vreinterpretq_p8_f64(float64x2_t __p0) { + poly8x16_t __ret; + __ret = (poly8x16_t)(__p0); + return __ret; +} +__ai poly8x16_t vreinterpretq_p8_f32(float32x4_t __p0) { + poly8x16_t __ret; + __ret = (poly8x16_t)(__p0); + return __ret; +} +__ai poly8x16_t vreinterpretq_p8_f16(float16x8_t __p0) { + poly8x16_t __ret; + __ret = (poly8x16_t)(__p0); + return __ret; +} +__ai poly8x16_t vreinterpretq_p8_s32(int32x4_t __p0) { + poly8x16_t __ret; + __ret = (poly8x16_t)(__p0); + return __ret; +} +__ai poly8x16_t vreinterpretq_p8_s64(int64x2_t __p0) { + poly8x16_t __ret; + __ret = (poly8x16_t)(__p0); + return __ret; +} +__ai poly8x16_t vreinterpretq_p8_s16(int16x8_t __p0) { + poly8x16_t __ret; + __ret = (poly8x16_t)(__p0); + return __ret; +} +__ai poly128_t vreinterpretq_p128_p8(poly8x16_t __p0) { + poly128_t __ret; + __ret = (poly128_t)(__p0); + return __ret; +} +__ai poly128_t vreinterpretq_p128_p64(poly64x2_t __p0) { + poly128_t __ret; + __ret = (poly128_t)(__p0); + return __ret; +} +__ai poly128_t vreinterpretq_p128_p16(poly16x8_t __p0) { + poly128_t __ret; + __ret = (poly128_t)(__p0); + return __ret; +} +__ai poly128_t vreinterpretq_p128_u8(uint8x16_t __p0) { + poly128_t __ret; + __ret = (poly128_t)(__p0); + return __ret; +} +__ai poly128_t vreinterpretq_p128_u32(uint32x4_t __p0) { + poly128_t __ret; + __ret = (poly128_t)(__p0); + return __ret; +} +__ai poly128_t vreinterpretq_p128_u64(uint64x2_t __p0) { + poly128_t __ret; + __ret = (poly128_t)(__p0); + return __ret; +} +__ai poly128_t vreinterpretq_p128_u16(uint16x8_t __p0) { + poly128_t __ret; + __ret = (poly128_t)(__p0); + return __ret; +} +__ai poly128_t vreinterpretq_p128_s8(int8x16_t __p0) { + poly128_t __ret; + __ret = (poly128_t)(__p0); + return __ret; +} +__ai poly128_t vreinterpretq_p128_f64(float64x2_t __p0) { + poly128_t __ret; + __ret = (poly128_t)(__p0); + return __ret; +} +__ai poly128_t vreinterpretq_p128_f32(float32x4_t __p0) { + poly128_t __ret; + __ret = (poly128_t)(__p0); + return __ret; +} +__ai poly128_t vreinterpretq_p128_f16(float16x8_t __p0) { + poly128_t __ret; + __ret = (poly128_t)(__p0); + return __ret; +} +__ai poly128_t vreinterpretq_p128_s32(int32x4_t __p0) { + poly128_t __ret; + __ret = (poly128_t)(__p0); + return __ret; +} +__ai poly128_t vreinterpretq_p128_s64(int64x2_t __p0) { + poly128_t __ret; + __ret = (poly128_t)(__p0); + return __ret; +} +__ai poly128_t vreinterpretq_p128_s16(int16x8_t __p0) { + poly128_t __ret; + __ret = (poly128_t)(__p0); + return __ret; +} +__ai poly64x2_t vreinterpretq_p64_p8(poly8x16_t __p0) { + poly64x2_t __ret; + __ret = (poly64x2_t)(__p0); + return __ret; +} +__ai poly64x2_t vreinterpretq_p64_p128(poly128_t __p0) { + poly64x2_t __ret; + __ret = (poly64x2_t)(__p0); + return __ret; +} +__ai poly64x2_t vreinterpretq_p64_p16(poly16x8_t __p0) { + poly64x2_t __ret; + __ret = (poly64x2_t)(__p0); + return __ret; +} +__ai poly64x2_t vreinterpretq_p64_u8(uint8x16_t __p0) { + poly64x2_t __ret; + __ret = (poly64x2_t)(__p0); + return __ret; +} +__ai poly64x2_t vreinterpretq_p64_u32(uint32x4_t __p0) { + poly64x2_t __ret; + __ret = (poly64x2_t)(__p0); + return __ret; +} +__ai poly64x2_t vreinterpretq_p64_u64(uint64x2_t __p0) { + poly64x2_t __ret; + __ret = (poly64x2_t)(__p0); + return __ret; +} +__ai poly64x2_t vreinterpretq_p64_u16(uint16x8_t __p0) { + poly64x2_t __ret; + __ret = (poly64x2_t)(__p0); + return __ret; +} +__ai poly64x2_t vreinterpretq_p64_s8(int8x16_t __p0) { + poly64x2_t __ret; + __ret = (poly64x2_t)(__p0); + return __ret; +} +__ai poly64x2_t vreinterpretq_p64_f64(float64x2_t __p0) { + poly64x2_t __ret; + __ret = (poly64x2_t)(__p0); + return __ret; +} +__ai poly64x2_t vreinterpretq_p64_f32(float32x4_t __p0) { + poly64x2_t __ret; + __ret = (poly64x2_t)(__p0); + return __ret; +} +__ai poly64x2_t vreinterpretq_p64_f16(float16x8_t __p0) { + poly64x2_t __ret; + __ret = (poly64x2_t)(__p0); + return __ret; +} +__ai poly64x2_t vreinterpretq_p64_s32(int32x4_t __p0) { + poly64x2_t __ret; + __ret = (poly64x2_t)(__p0); + return __ret; +} +__ai poly64x2_t vreinterpretq_p64_s64(int64x2_t __p0) { + poly64x2_t __ret; + __ret = (poly64x2_t)(__p0); + return __ret; +} +__ai poly64x2_t vreinterpretq_p64_s16(int16x8_t __p0) { + poly64x2_t __ret; + __ret = (poly64x2_t)(__p0); + return __ret; +} +__ai poly16x8_t vreinterpretq_p16_p8(poly8x16_t __p0) { + poly16x8_t __ret; + __ret = (poly16x8_t)(__p0); + return __ret; +} +__ai poly16x8_t vreinterpretq_p16_p128(poly128_t __p0) { + poly16x8_t __ret; + __ret = (poly16x8_t)(__p0); + return __ret; +} +__ai poly16x8_t vreinterpretq_p16_p64(poly64x2_t __p0) { + poly16x8_t __ret; + __ret = (poly16x8_t)(__p0); + return __ret; +} +__ai poly16x8_t vreinterpretq_p16_u8(uint8x16_t __p0) { + poly16x8_t __ret; + __ret = (poly16x8_t)(__p0); + return __ret; +} +__ai poly16x8_t vreinterpretq_p16_u32(uint32x4_t __p0) { + poly16x8_t __ret; + __ret = (poly16x8_t)(__p0); + return __ret; +} +__ai poly16x8_t vreinterpretq_p16_u64(uint64x2_t __p0) { + poly16x8_t __ret; + __ret = (poly16x8_t)(__p0); + return __ret; +} +__ai poly16x8_t vreinterpretq_p16_u16(uint16x8_t __p0) { + poly16x8_t __ret; + __ret = (poly16x8_t)(__p0); + return __ret; +} +__ai poly16x8_t vreinterpretq_p16_s8(int8x16_t __p0) { + poly16x8_t __ret; + __ret = (poly16x8_t)(__p0); + return __ret; +} +__ai poly16x8_t vreinterpretq_p16_f64(float64x2_t __p0) { + poly16x8_t __ret; + __ret = (poly16x8_t)(__p0); + return __ret; +} +__ai poly16x8_t vreinterpretq_p16_f32(float32x4_t __p0) { + poly16x8_t __ret; + __ret = (poly16x8_t)(__p0); + return __ret; +} +__ai poly16x8_t vreinterpretq_p16_f16(float16x8_t __p0) { + poly16x8_t __ret; + __ret = (poly16x8_t)(__p0); + return __ret; +} +__ai poly16x8_t vreinterpretq_p16_s32(int32x4_t __p0) { + poly16x8_t __ret; + __ret = (poly16x8_t)(__p0); + return __ret; +} +__ai poly16x8_t vreinterpretq_p16_s64(int64x2_t __p0) { + poly16x8_t __ret; + __ret = (poly16x8_t)(__p0); + return __ret; +} +__ai poly16x8_t vreinterpretq_p16_s16(int16x8_t __p0) { + poly16x8_t __ret; + __ret = (poly16x8_t)(__p0); + return __ret; +} +__ai uint8x16_t vreinterpretq_u8_p8(poly8x16_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t)(__p0); + return __ret; +} +__ai uint8x16_t vreinterpretq_u8_p128(poly128_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t)(__p0); + return __ret; +} +__ai uint8x16_t vreinterpretq_u8_p64(poly64x2_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t)(__p0); + return __ret; +} +__ai uint8x16_t vreinterpretq_u8_p16(poly16x8_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t)(__p0); + return __ret; +} +__ai uint8x16_t vreinterpretq_u8_u32(uint32x4_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t)(__p0); + return __ret; +} +__ai uint8x16_t vreinterpretq_u8_u64(uint64x2_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t)(__p0); + return __ret; +} +__ai uint8x16_t vreinterpretq_u8_u16(uint16x8_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t)(__p0); + return __ret; +} +__ai uint8x16_t vreinterpretq_u8_s8(int8x16_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t)(__p0); + return __ret; +} +__ai uint8x16_t vreinterpretq_u8_f64(float64x2_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t)(__p0); + return __ret; +} +__ai uint8x16_t vreinterpretq_u8_f32(float32x4_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t)(__p0); + return __ret; +} +__ai uint8x16_t vreinterpretq_u8_f16(float16x8_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t)(__p0); + return __ret; +} +__ai uint8x16_t vreinterpretq_u8_s32(int32x4_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t)(__p0); + return __ret; +} +__ai uint8x16_t vreinterpretq_u8_s64(int64x2_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t)(__p0); + return __ret; +} +__ai uint8x16_t vreinterpretq_u8_s16(int16x8_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t)(__p0); + return __ret; +} +__ai uint32x4_t vreinterpretq_u32_p8(poly8x16_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0); + return __ret; +} +__ai uint32x4_t vreinterpretq_u32_p128(poly128_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0); + return __ret; +} +__ai uint32x4_t vreinterpretq_u32_p64(poly64x2_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0); + return __ret; +} +__ai uint32x4_t vreinterpretq_u32_p16(poly16x8_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0); + return __ret; +} +__ai uint32x4_t vreinterpretq_u32_u8(uint8x16_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0); + return __ret; +} +__ai uint32x4_t vreinterpretq_u32_u64(uint64x2_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0); + return __ret; +} +__ai uint32x4_t vreinterpretq_u32_u16(uint16x8_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0); + return __ret; +} +__ai uint32x4_t vreinterpretq_u32_s8(int8x16_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0); + return __ret; +} +__ai uint32x4_t vreinterpretq_u32_f64(float64x2_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0); + return __ret; +} +__ai uint32x4_t vreinterpretq_u32_f32(float32x4_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0); + return __ret; +} +__ai uint32x4_t vreinterpretq_u32_f16(float16x8_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0); + return __ret; +} +__ai uint32x4_t vreinterpretq_u32_s32(int32x4_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0); + return __ret; +} +__ai uint32x4_t vreinterpretq_u32_s64(int64x2_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0); + return __ret; +} +__ai uint32x4_t vreinterpretq_u32_s16(int16x8_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0); + return __ret; +} +__ai uint64x2_t vreinterpretq_u64_p8(poly8x16_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0); + return __ret; +} +__ai uint64x2_t vreinterpretq_u64_p128(poly128_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0); + return __ret; +} +__ai uint64x2_t vreinterpretq_u64_p64(poly64x2_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0); + return __ret; +} +__ai uint64x2_t vreinterpretq_u64_p16(poly16x8_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0); + return __ret; +} +__ai uint64x2_t vreinterpretq_u64_u8(uint8x16_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0); + return __ret; +} +__ai uint64x2_t vreinterpretq_u64_u32(uint32x4_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0); + return __ret; +} +__ai uint64x2_t vreinterpretq_u64_u16(uint16x8_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0); + return __ret; +} +__ai uint64x2_t vreinterpretq_u64_s8(int8x16_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0); + return __ret; +} +__ai uint64x2_t vreinterpretq_u64_f64(float64x2_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0); + return __ret; +} +__ai uint64x2_t vreinterpretq_u64_f32(float32x4_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0); + return __ret; +} +__ai uint64x2_t vreinterpretq_u64_f16(float16x8_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0); + return __ret; +} +__ai uint64x2_t vreinterpretq_u64_s32(int32x4_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0); + return __ret; +} +__ai uint64x2_t vreinterpretq_u64_s64(int64x2_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0); + return __ret; +} +__ai uint64x2_t vreinterpretq_u64_s16(int16x8_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0); + return __ret; +} +__ai uint16x8_t vreinterpretq_u16_p8(poly8x16_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0); + return __ret; +} +__ai uint16x8_t vreinterpretq_u16_p128(poly128_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0); + return __ret; +} +__ai uint16x8_t vreinterpretq_u16_p64(poly64x2_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0); + return __ret; +} +__ai uint16x8_t vreinterpretq_u16_p16(poly16x8_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0); + return __ret; +} +__ai uint16x8_t vreinterpretq_u16_u8(uint8x16_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0); + return __ret; +} +__ai uint16x8_t vreinterpretq_u16_u32(uint32x4_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0); + return __ret; +} +__ai uint16x8_t vreinterpretq_u16_u64(uint64x2_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0); + return __ret; +} +__ai uint16x8_t vreinterpretq_u16_s8(int8x16_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0); + return __ret; +} +__ai uint16x8_t vreinterpretq_u16_f64(float64x2_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0); + return __ret; +} +__ai uint16x8_t vreinterpretq_u16_f32(float32x4_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0); + return __ret; +} +__ai uint16x8_t vreinterpretq_u16_f16(float16x8_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0); + return __ret; +} +__ai uint16x8_t vreinterpretq_u16_s32(int32x4_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0); + return __ret; +} +__ai uint16x8_t vreinterpretq_u16_s64(int64x2_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0); + return __ret; +} +__ai uint16x8_t vreinterpretq_u16_s16(int16x8_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0); + return __ret; +} +__ai int8x16_t vreinterpretq_s8_p8(poly8x16_t __p0) { + int8x16_t __ret; + __ret = (int8x16_t)(__p0); + return __ret; +} +__ai int8x16_t vreinterpretq_s8_p128(poly128_t __p0) { + int8x16_t __ret; + __ret = (int8x16_t)(__p0); + return __ret; +} +__ai int8x16_t vreinterpretq_s8_p64(poly64x2_t __p0) { + int8x16_t __ret; + __ret = (int8x16_t)(__p0); + return __ret; +} +__ai int8x16_t vreinterpretq_s8_p16(poly16x8_t __p0) { + int8x16_t __ret; + __ret = (int8x16_t)(__p0); + return __ret; +} +__ai int8x16_t vreinterpretq_s8_u8(uint8x16_t __p0) { + int8x16_t __ret; + __ret = (int8x16_t)(__p0); + return __ret; +} +__ai int8x16_t vreinterpretq_s8_u32(uint32x4_t __p0) { + int8x16_t __ret; + __ret = (int8x16_t)(__p0); + return __ret; +} +__ai int8x16_t vreinterpretq_s8_u64(uint64x2_t __p0) { + int8x16_t __ret; + __ret = (int8x16_t)(__p0); + return __ret; +} +__ai int8x16_t vreinterpretq_s8_u16(uint16x8_t __p0) { + int8x16_t __ret; + __ret = (int8x16_t)(__p0); + return __ret; +} +__ai int8x16_t vreinterpretq_s8_f64(float64x2_t __p0) { + int8x16_t __ret; + __ret = (int8x16_t)(__p0); + return __ret; +} +__ai int8x16_t vreinterpretq_s8_f32(float32x4_t __p0) { + int8x16_t __ret; + __ret = (int8x16_t)(__p0); + return __ret; +} +__ai int8x16_t vreinterpretq_s8_f16(float16x8_t __p0) { + int8x16_t __ret; + __ret = (int8x16_t)(__p0); + return __ret; +} +__ai int8x16_t vreinterpretq_s8_s32(int32x4_t __p0) { + int8x16_t __ret; + __ret = (int8x16_t)(__p0); + return __ret; +} +__ai int8x16_t vreinterpretq_s8_s64(int64x2_t __p0) { + int8x16_t __ret; + __ret = (int8x16_t)(__p0); + return __ret; +} +__ai int8x16_t vreinterpretq_s8_s16(int16x8_t __p0) { + int8x16_t __ret; + __ret = (int8x16_t)(__p0); + return __ret; +} +__ai float64x2_t vreinterpretq_f64_p8(poly8x16_t __p0) { + float64x2_t __ret; + __ret = (float64x2_t)(__p0); + return __ret; +} +__ai float64x2_t vreinterpretq_f64_p128(poly128_t __p0) { + float64x2_t __ret; + __ret = (float64x2_t)(__p0); + return __ret; +} +__ai float64x2_t vreinterpretq_f64_p64(poly64x2_t __p0) { + float64x2_t __ret; + __ret = (float64x2_t)(__p0); + return __ret; +} +__ai float64x2_t vreinterpretq_f64_p16(poly16x8_t __p0) { + float64x2_t __ret; + __ret = (float64x2_t)(__p0); + return __ret; +} +__ai float64x2_t vreinterpretq_f64_u8(uint8x16_t __p0) { + float64x2_t __ret; + __ret = (float64x2_t)(__p0); + return __ret; +} +__ai float64x2_t vreinterpretq_f64_u32(uint32x4_t __p0) { + float64x2_t __ret; + __ret = (float64x2_t)(__p0); + return __ret; +} +__ai float64x2_t vreinterpretq_f64_u64(uint64x2_t __p0) { + float64x2_t __ret; + __ret = (float64x2_t)(__p0); + return __ret; +} +__ai float64x2_t vreinterpretq_f64_u16(uint16x8_t __p0) { + float64x2_t __ret; + __ret = (float64x2_t)(__p0); + return __ret; +} +__ai float64x2_t vreinterpretq_f64_s8(int8x16_t __p0) { + float64x2_t __ret; + __ret = (float64x2_t)(__p0); + return __ret; +} +__ai float64x2_t vreinterpretq_f64_f32(float32x4_t __p0) { + float64x2_t __ret; + __ret = (float64x2_t)(__p0); + return __ret; +} +__ai float64x2_t vreinterpretq_f64_f16(float16x8_t __p0) { + float64x2_t __ret; + __ret = (float64x2_t)(__p0); + return __ret; +} +__ai float64x2_t vreinterpretq_f64_s32(int32x4_t __p0) { + float64x2_t __ret; + __ret = (float64x2_t)(__p0); + return __ret; +} +__ai float64x2_t vreinterpretq_f64_s64(int64x2_t __p0) { + float64x2_t __ret; + __ret = (float64x2_t)(__p0); + return __ret; +} +__ai float64x2_t vreinterpretq_f64_s16(int16x8_t __p0) { + float64x2_t __ret; + __ret = (float64x2_t)(__p0); + return __ret; +} +__ai float32x4_t vreinterpretq_f32_p8(poly8x16_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t)(__p0); + return __ret; +} +__ai float32x4_t vreinterpretq_f32_p128(poly128_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t)(__p0); + return __ret; +} +__ai float32x4_t vreinterpretq_f32_p64(poly64x2_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t)(__p0); + return __ret; +} +__ai float32x4_t vreinterpretq_f32_p16(poly16x8_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t)(__p0); + return __ret; +} +__ai float32x4_t vreinterpretq_f32_u8(uint8x16_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t)(__p0); + return __ret; +} +__ai float32x4_t vreinterpretq_f32_u32(uint32x4_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t)(__p0); + return __ret; +} +__ai float32x4_t vreinterpretq_f32_u64(uint64x2_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t)(__p0); + return __ret; +} +__ai float32x4_t vreinterpretq_f32_u16(uint16x8_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t)(__p0); + return __ret; +} +__ai float32x4_t vreinterpretq_f32_s8(int8x16_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t)(__p0); + return __ret; +} +__ai float32x4_t vreinterpretq_f32_f64(float64x2_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t)(__p0); + return __ret; +} +__ai float32x4_t vreinterpretq_f32_f16(float16x8_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t)(__p0); + return __ret; +} +__ai float32x4_t vreinterpretq_f32_s32(int32x4_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t)(__p0); + return __ret; +} +__ai float32x4_t vreinterpretq_f32_s64(int64x2_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t)(__p0); + return __ret; +} +__ai float32x4_t vreinterpretq_f32_s16(int16x8_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t)(__p0); + return __ret; +} +__ai float16x8_t vreinterpretq_f16_p8(poly8x16_t __p0) { + float16x8_t __ret; + __ret = (float16x8_t)(__p0); + return __ret; +} +__ai float16x8_t vreinterpretq_f16_p128(poly128_t __p0) { + float16x8_t __ret; + __ret = (float16x8_t)(__p0); + return __ret; +} +__ai float16x8_t vreinterpretq_f16_p64(poly64x2_t __p0) { + float16x8_t __ret; + __ret = (float16x8_t)(__p0); + return __ret; +} +__ai float16x8_t vreinterpretq_f16_p16(poly16x8_t __p0) { + float16x8_t __ret; + __ret = (float16x8_t)(__p0); + return __ret; +} +__ai float16x8_t vreinterpretq_f16_u8(uint8x16_t __p0) { + float16x8_t __ret; + __ret = (float16x8_t)(__p0); + return __ret; +} +__ai float16x8_t vreinterpretq_f16_u32(uint32x4_t __p0) { + float16x8_t __ret; + __ret = (float16x8_t)(__p0); + return __ret; +} +__ai float16x8_t vreinterpretq_f16_u64(uint64x2_t __p0) { + float16x8_t __ret; + __ret = (float16x8_t)(__p0); + return __ret; +} +__ai float16x8_t vreinterpretq_f16_u16(uint16x8_t __p0) { + float16x8_t __ret; + __ret = (float16x8_t)(__p0); + return __ret; +} +__ai float16x8_t vreinterpretq_f16_s8(int8x16_t __p0) { + float16x8_t __ret; + __ret = (float16x8_t)(__p0); + return __ret; +} +__ai float16x8_t vreinterpretq_f16_f64(float64x2_t __p0) { + float16x8_t __ret; + __ret = (float16x8_t)(__p0); + return __ret; +} +__ai float16x8_t vreinterpretq_f16_f32(float32x4_t __p0) { + float16x8_t __ret; + __ret = (float16x8_t)(__p0); + return __ret; +} +__ai float16x8_t vreinterpretq_f16_s32(int32x4_t __p0) { + float16x8_t __ret; + __ret = (float16x8_t)(__p0); + return __ret; +} +__ai float16x8_t vreinterpretq_f16_s64(int64x2_t __p0) { + float16x8_t __ret; + __ret = (float16x8_t)(__p0); + return __ret; +} +__ai float16x8_t vreinterpretq_f16_s16(int16x8_t __p0) { + float16x8_t __ret; + __ret = (float16x8_t)(__p0); + return __ret; +} +__ai int32x4_t vreinterpretq_s32_p8(poly8x16_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t)(__p0); + return __ret; +} +__ai int32x4_t vreinterpretq_s32_p128(poly128_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t)(__p0); + return __ret; +} +__ai int32x4_t vreinterpretq_s32_p64(poly64x2_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t)(__p0); + return __ret; +} +__ai int32x4_t vreinterpretq_s32_p16(poly16x8_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t)(__p0); + return __ret; +} +__ai int32x4_t vreinterpretq_s32_u8(uint8x16_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t)(__p0); + return __ret; +} +__ai int32x4_t vreinterpretq_s32_u32(uint32x4_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t)(__p0); + return __ret; +} +__ai int32x4_t vreinterpretq_s32_u64(uint64x2_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t)(__p0); + return __ret; +} +__ai int32x4_t vreinterpretq_s32_u16(uint16x8_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t)(__p0); + return __ret; +} +__ai int32x4_t vreinterpretq_s32_s8(int8x16_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t)(__p0); + return __ret; +} +__ai int32x4_t vreinterpretq_s32_f64(float64x2_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t)(__p0); + return __ret; +} +__ai int32x4_t vreinterpretq_s32_f32(float32x4_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t)(__p0); + return __ret; +} +__ai int32x4_t vreinterpretq_s32_f16(float16x8_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t)(__p0); + return __ret; +} +__ai int32x4_t vreinterpretq_s32_s64(int64x2_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t)(__p0); + return __ret; +} +__ai int32x4_t vreinterpretq_s32_s16(int16x8_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t)(__p0); + return __ret; +} +__ai int64x2_t vreinterpretq_s64_p8(poly8x16_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t)(__p0); + return __ret; +} +__ai int64x2_t vreinterpretq_s64_p128(poly128_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t)(__p0); + return __ret; +} +__ai int64x2_t vreinterpretq_s64_p64(poly64x2_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t)(__p0); + return __ret; +} +__ai int64x2_t vreinterpretq_s64_p16(poly16x8_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t)(__p0); + return __ret; +} +__ai int64x2_t vreinterpretq_s64_u8(uint8x16_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t)(__p0); + return __ret; +} +__ai int64x2_t vreinterpretq_s64_u32(uint32x4_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t)(__p0); + return __ret; +} +__ai int64x2_t vreinterpretq_s64_u64(uint64x2_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t)(__p0); + return __ret; +} +__ai int64x2_t vreinterpretq_s64_u16(uint16x8_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t)(__p0); + return __ret; +} +__ai int64x2_t vreinterpretq_s64_s8(int8x16_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t)(__p0); + return __ret; +} +__ai int64x2_t vreinterpretq_s64_f64(float64x2_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t)(__p0); + return __ret; +} +__ai int64x2_t vreinterpretq_s64_f32(float32x4_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t)(__p0); + return __ret; +} +__ai int64x2_t vreinterpretq_s64_f16(float16x8_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t)(__p0); + return __ret; +} +__ai int64x2_t vreinterpretq_s64_s32(int32x4_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t)(__p0); + return __ret; +} +__ai int64x2_t vreinterpretq_s64_s16(int16x8_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t)(__p0); + return __ret; +} +__ai int16x8_t vreinterpretq_s16_p8(poly8x16_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t)(__p0); + return __ret; +} +__ai int16x8_t vreinterpretq_s16_p128(poly128_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t)(__p0); + return __ret; +} +__ai int16x8_t vreinterpretq_s16_p64(poly64x2_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t)(__p0); + return __ret; +} +__ai int16x8_t vreinterpretq_s16_p16(poly16x8_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t)(__p0); + return __ret; +} +__ai int16x8_t vreinterpretq_s16_u8(uint8x16_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t)(__p0); + return __ret; +} +__ai int16x8_t vreinterpretq_s16_u32(uint32x4_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t)(__p0); + return __ret; +} +__ai int16x8_t vreinterpretq_s16_u64(uint64x2_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t)(__p0); + return __ret; +} +__ai int16x8_t vreinterpretq_s16_u16(uint16x8_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t)(__p0); + return __ret; +} +__ai int16x8_t vreinterpretq_s16_s8(int8x16_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t)(__p0); + return __ret; +} +__ai int16x8_t vreinterpretq_s16_f64(float64x2_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t)(__p0); + return __ret; +} +__ai int16x8_t vreinterpretq_s16_f32(float32x4_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t)(__p0); + return __ret; +} +__ai int16x8_t vreinterpretq_s16_f16(float16x8_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t)(__p0); + return __ret; +} +__ai int16x8_t vreinterpretq_s16_s32(int32x4_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t)(__p0); + return __ret; +} +__ai int16x8_t vreinterpretq_s16_s64(int64x2_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t)(__p0); + return __ret; +} +__ai uint8x8_t vreinterpret_u8_p8(poly8x8_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t)(__p0); + return __ret; +} +__ai uint8x8_t vreinterpret_u8_p64(poly64x1_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t)(__p0); + return __ret; +} +__ai uint8x8_t vreinterpret_u8_p16(poly16x4_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t)(__p0); + return __ret; +} +__ai uint8x8_t vreinterpret_u8_u32(uint32x2_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t)(__p0); + return __ret; +} +__ai uint8x8_t vreinterpret_u8_u64(uint64x1_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t)(__p0); + return __ret; +} +__ai uint8x8_t vreinterpret_u8_u16(uint16x4_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t)(__p0); + return __ret; +} +__ai uint8x8_t vreinterpret_u8_s8(int8x8_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t)(__p0); + return __ret; +} +__ai uint8x8_t vreinterpret_u8_f64(float64x1_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t)(__p0); + return __ret; +} +__ai uint8x8_t vreinterpret_u8_f32(float32x2_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t)(__p0); + return __ret; +} +__ai uint8x8_t vreinterpret_u8_f16(float16x4_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t)(__p0); + return __ret; +} +__ai uint8x8_t vreinterpret_u8_s32(int32x2_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t)(__p0); + return __ret; +} +__ai uint8x8_t vreinterpret_u8_s64(int64x1_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t)(__p0); + return __ret; +} +__ai uint8x8_t vreinterpret_u8_s16(int16x4_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t)(__p0); + return __ret; +} +__ai uint32x2_t vreinterpret_u32_p8(poly8x8_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t)(__p0); + return __ret; +} +__ai uint32x2_t vreinterpret_u32_p64(poly64x1_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t)(__p0); + return __ret; +} +__ai uint32x2_t vreinterpret_u32_p16(poly16x4_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t)(__p0); + return __ret; +} +__ai uint32x2_t vreinterpret_u32_u8(uint8x8_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t)(__p0); + return __ret; +} +__ai uint32x2_t vreinterpret_u32_u64(uint64x1_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t)(__p0); + return __ret; +} +__ai uint32x2_t vreinterpret_u32_u16(uint16x4_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t)(__p0); + return __ret; +} +__ai uint32x2_t vreinterpret_u32_s8(int8x8_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t)(__p0); + return __ret; +} +__ai uint32x2_t vreinterpret_u32_f64(float64x1_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t)(__p0); + return __ret; +} +__ai uint32x2_t vreinterpret_u32_f32(float32x2_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t)(__p0); + return __ret; +} +__ai uint32x2_t vreinterpret_u32_f16(float16x4_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t)(__p0); + return __ret; +} +__ai uint32x2_t vreinterpret_u32_s32(int32x2_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t)(__p0); + return __ret; +} +__ai uint32x2_t vreinterpret_u32_s64(int64x1_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t)(__p0); + return __ret; +} +__ai uint32x2_t vreinterpret_u32_s16(int16x4_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t)(__p0); + return __ret; +} +__ai uint64x1_t vreinterpret_u64_p8(poly8x8_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0); + return __ret; +} +__ai uint64x1_t vreinterpret_u64_p64(poly64x1_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0); + return __ret; +} +__ai uint64x1_t vreinterpret_u64_p16(poly16x4_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0); + return __ret; +} +__ai uint64x1_t vreinterpret_u64_u8(uint8x8_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0); + return __ret; +} +__ai uint64x1_t vreinterpret_u64_u32(uint32x2_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0); + return __ret; +} +__ai uint64x1_t vreinterpret_u64_u16(uint16x4_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0); + return __ret; +} +__ai uint64x1_t vreinterpret_u64_s8(int8x8_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0); + return __ret; +} +__ai uint64x1_t vreinterpret_u64_f64(float64x1_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0); + return __ret; +} +__ai uint64x1_t vreinterpret_u64_f32(float32x2_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0); + return __ret; +} +__ai uint64x1_t vreinterpret_u64_f16(float16x4_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0); + return __ret; +} +__ai uint64x1_t vreinterpret_u64_s32(int32x2_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0); + return __ret; +} +__ai uint64x1_t vreinterpret_u64_s64(int64x1_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0); + return __ret; +} +__ai uint64x1_t vreinterpret_u64_s16(int16x4_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0); + return __ret; +} +__ai uint16x4_t vreinterpret_u16_p8(poly8x8_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t)(__p0); + return __ret; +} +__ai uint16x4_t vreinterpret_u16_p64(poly64x1_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t)(__p0); + return __ret; +} +__ai uint16x4_t vreinterpret_u16_p16(poly16x4_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t)(__p0); + return __ret; +} +__ai uint16x4_t vreinterpret_u16_u8(uint8x8_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t)(__p0); + return __ret; +} +__ai uint16x4_t vreinterpret_u16_u32(uint32x2_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t)(__p0); + return __ret; +} +__ai uint16x4_t vreinterpret_u16_u64(uint64x1_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t)(__p0); + return __ret; +} +__ai uint16x4_t vreinterpret_u16_s8(int8x8_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t)(__p0); + return __ret; +} +__ai uint16x4_t vreinterpret_u16_f64(float64x1_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t)(__p0); + return __ret; +} +__ai uint16x4_t vreinterpret_u16_f32(float32x2_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t)(__p0); + return __ret; +} +__ai uint16x4_t vreinterpret_u16_f16(float16x4_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t)(__p0); + return __ret; +} +__ai uint16x4_t vreinterpret_u16_s32(int32x2_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t)(__p0); + return __ret; +} +__ai uint16x4_t vreinterpret_u16_s64(int64x1_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t)(__p0); + return __ret; +} +__ai uint16x4_t vreinterpret_u16_s16(int16x4_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t)(__p0); + return __ret; +} +__ai int8x8_t vreinterpret_s8_p8(poly8x8_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t)(__p0); + return __ret; +} +__ai int8x8_t vreinterpret_s8_p64(poly64x1_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t)(__p0); + return __ret; +} +__ai int8x8_t vreinterpret_s8_p16(poly16x4_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t)(__p0); + return __ret; +} +__ai int8x8_t vreinterpret_s8_u8(uint8x8_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t)(__p0); + return __ret; +} +__ai int8x8_t vreinterpret_s8_u32(uint32x2_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t)(__p0); + return __ret; +} +__ai int8x8_t vreinterpret_s8_u64(uint64x1_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t)(__p0); + return __ret; +} +__ai int8x8_t vreinterpret_s8_u16(uint16x4_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t)(__p0); + return __ret; +} +__ai int8x8_t vreinterpret_s8_f64(float64x1_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t)(__p0); + return __ret; +} +__ai int8x8_t vreinterpret_s8_f32(float32x2_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t)(__p0); + return __ret; +} +__ai int8x8_t vreinterpret_s8_f16(float16x4_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t)(__p0); + return __ret; +} +__ai int8x8_t vreinterpret_s8_s32(int32x2_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t)(__p0); + return __ret; +} +__ai int8x8_t vreinterpret_s8_s64(int64x1_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t)(__p0); + return __ret; +} +__ai int8x8_t vreinterpret_s8_s16(int16x4_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t)(__p0); + return __ret; +} +__ai float64x1_t vreinterpret_f64_p8(poly8x8_t __p0) { + float64x1_t __ret; + __ret = (float64x1_t)(__p0); + return __ret; +} +__ai float64x1_t vreinterpret_f64_p64(poly64x1_t __p0) { + float64x1_t __ret; + __ret = (float64x1_t)(__p0); + return __ret; +} +__ai float64x1_t vreinterpret_f64_p16(poly16x4_t __p0) { + float64x1_t __ret; + __ret = (float64x1_t)(__p0); + return __ret; +} +__ai float64x1_t vreinterpret_f64_u8(uint8x8_t __p0) { + float64x1_t __ret; + __ret = (float64x1_t)(__p0); + return __ret; +} +__ai float64x1_t vreinterpret_f64_u32(uint32x2_t __p0) { + float64x1_t __ret; + __ret = (float64x1_t)(__p0); + return __ret; +} +__ai float64x1_t vreinterpret_f64_u64(uint64x1_t __p0) { + float64x1_t __ret; + __ret = (float64x1_t)(__p0); + return __ret; +} +__ai float64x1_t vreinterpret_f64_u16(uint16x4_t __p0) { + float64x1_t __ret; + __ret = (float64x1_t)(__p0); + return __ret; +} +__ai float64x1_t vreinterpret_f64_s8(int8x8_t __p0) { + float64x1_t __ret; + __ret = (float64x1_t)(__p0); + return __ret; +} +__ai float64x1_t vreinterpret_f64_f32(float32x2_t __p0) { + float64x1_t __ret; + __ret = (float64x1_t)(__p0); + return __ret; +} +__ai float64x1_t vreinterpret_f64_f16(float16x4_t __p0) { + float64x1_t __ret; + __ret = (float64x1_t)(__p0); + return __ret; +} +__ai float64x1_t vreinterpret_f64_s32(int32x2_t __p0) { + float64x1_t __ret; + __ret = (float64x1_t)(__p0); + return __ret; +} +__ai float64x1_t vreinterpret_f64_s64(int64x1_t __p0) { + float64x1_t __ret; + __ret = (float64x1_t)(__p0); + return __ret; +} +__ai float64x1_t vreinterpret_f64_s16(int16x4_t __p0) { + float64x1_t __ret; + __ret = (float64x1_t)(__p0); + return __ret; +} +__ai float32x2_t vreinterpret_f32_p8(poly8x8_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t)(__p0); + return __ret; +} +__ai float32x2_t vreinterpret_f32_p64(poly64x1_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t)(__p0); + return __ret; +} +__ai float32x2_t vreinterpret_f32_p16(poly16x4_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t)(__p0); + return __ret; +} +__ai float32x2_t vreinterpret_f32_u8(uint8x8_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t)(__p0); + return __ret; +} +__ai float32x2_t vreinterpret_f32_u32(uint32x2_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t)(__p0); + return __ret; +} +__ai float32x2_t vreinterpret_f32_u64(uint64x1_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t)(__p0); + return __ret; +} +__ai float32x2_t vreinterpret_f32_u16(uint16x4_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t)(__p0); + return __ret; +} +__ai float32x2_t vreinterpret_f32_s8(int8x8_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t)(__p0); + return __ret; +} +__ai float32x2_t vreinterpret_f32_f64(float64x1_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t)(__p0); + return __ret; +} +__ai float32x2_t vreinterpret_f32_f16(float16x4_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t)(__p0); + return __ret; +} +__ai float32x2_t vreinterpret_f32_s32(int32x2_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t)(__p0); + return __ret; +} +__ai float32x2_t vreinterpret_f32_s64(int64x1_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t)(__p0); + return __ret; +} +__ai float32x2_t vreinterpret_f32_s16(int16x4_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t)(__p0); + return __ret; +} +__ai float16x4_t vreinterpret_f16_p8(poly8x8_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t)(__p0); + return __ret; +} +__ai float16x4_t vreinterpret_f16_p64(poly64x1_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t)(__p0); + return __ret; +} +__ai float16x4_t vreinterpret_f16_p16(poly16x4_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t)(__p0); + return __ret; +} +__ai float16x4_t vreinterpret_f16_u8(uint8x8_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t)(__p0); + return __ret; +} +__ai float16x4_t vreinterpret_f16_u32(uint32x2_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t)(__p0); + return __ret; +} +__ai float16x4_t vreinterpret_f16_u64(uint64x1_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t)(__p0); + return __ret; +} +__ai float16x4_t vreinterpret_f16_u16(uint16x4_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t)(__p0); + return __ret; +} +__ai float16x4_t vreinterpret_f16_s8(int8x8_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t)(__p0); + return __ret; +} +__ai float16x4_t vreinterpret_f16_f64(float64x1_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t)(__p0); + return __ret; +} +__ai float16x4_t vreinterpret_f16_f32(float32x2_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t)(__p0); + return __ret; +} +__ai float16x4_t vreinterpret_f16_s32(int32x2_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t)(__p0); + return __ret; +} +__ai float16x4_t vreinterpret_f16_s64(int64x1_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t)(__p0); + return __ret; +} +__ai float16x4_t vreinterpret_f16_s16(int16x4_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t)(__p0); + return __ret; +} +__ai int32x2_t vreinterpret_s32_p8(poly8x8_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t)(__p0); + return __ret; +} +__ai int32x2_t vreinterpret_s32_p64(poly64x1_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t)(__p0); + return __ret; +} +__ai int32x2_t vreinterpret_s32_p16(poly16x4_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t)(__p0); + return __ret; +} +__ai int32x2_t vreinterpret_s32_u8(uint8x8_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t)(__p0); + return __ret; +} +__ai int32x2_t vreinterpret_s32_u32(uint32x2_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t)(__p0); + return __ret; +} +__ai int32x2_t vreinterpret_s32_u64(uint64x1_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t)(__p0); + return __ret; +} +__ai int32x2_t vreinterpret_s32_u16(uint16x4_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t)(__p0); + return __ret; +} +__ai int32x2_t vreinterpret_s32_s8(int8x8_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t)(__p0); + return __ret; +} +__ai int32x2_t vreinterpret_s32_f64(float64x1_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t)(__p0); + return __ret; +} +__ai int32x2_t vreinterpret_s32_f32(float32x2_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t)(__p0); + return __ret; +} +__ai int32x2_t vreinterpret_s32_f16(float16x4_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t)(__p0); + return __ret; +} +__ai int32x2_t vreinterpret_s32_s64(int64x1_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t)(__p0); + return __ret; +} +__ai int32x2_t vreinterpret_s32_s16(int16x4_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t)(__p0); + return __ret; +} +__ai int64x1_t vreinterpret_s64_p8(poly8x8_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t)(__p0); + return __ret; +} +__ai int64x1_t vreinterpret_s64_p64(poly64x1_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t)(__p0); + return __ret; +} +__ai int64x1_t vreinterpret_s64_p16(poly16x4_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t)(__p0); + return __ret; +} +__ai int64x1_t vreinterpret_s64_u8(uint8x8_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t)(__p0); + return __ret; +} +__ai int64x1_t vreinterpret_s64_u32(uint32x2_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t)(__p0); + return __ret; +} +__ai int64x1_t vreinterpret_s64_u64(uint64x1_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t)(__p0); + return __ret; +} +__ai int64x1_t vreinterpret_s64_u16(uint16x4_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t)(__p0); + return __ret; +} +__ai int64x1_t vreinterpret_s64_s8(int8x8_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t)(__p0); + return __ret; +} +__ai int64x1_t vreinterpret_s64_f64(float64x1_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t)(__p0); + return __ret; +} +__ai int64x1_t vreinterpret_s64_f32(float32x2_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t)(__p0); + return __ret; +} +__ai int64x1_t vreinterpret_s64_f16(float16x4_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t)(__p0); + return __ret; +} +__ai int64x1_t vreinterpret_s64_s32(int32x2_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t)(__p0); + return __ret; +} +__ai int64x1_t vreinterpret_s64_s16(int16x4_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t)(__p0); + return __ret; +} +__ai int16x4_t vreinterpret_s16_p8(poly8x8_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t)(__p0); + return __ret; +} +__ai int16x4_t vreinterpret_s16_p64(poly64x1_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t)(__p0); + return __ret; +} +__ai int16x4_t vreinterpret_s16_p16(poly16x4_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t)(__p0); + return __ret; +} +__ai int16x4_t vreinterpret_s16_u8(uint8x8_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t)(__p0); + return __ret; +} +__ai int16x4_t vreinterpret_s16_u32(uint32x2_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t)(__p0); + return __ret; +} +__ai int16x4_t vreinterpret_s16_u64(uint64x1_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t)(__p0); + return __ret; +} +__ai int16x4_t vreinterpret_s16_u16(uint16x4_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t)(__p0); + return __ret; +} +__ai int16x4_t vreinterpret_s16_s8(int8x8_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t)(__p0); + return __ret; +} +__ai int16x4_t vreinterpret_s16_f64(float64x1_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t)(__p0); + return __ret; +} +__ai int16x4_t vreinterpret_s16_f32(float32x2_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t)(__p0); + return __ret; +} +__ai int16x4_t vreinterpret_s16_f16(float16x4_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t)(__p0); + return __ret; +} +__ai int16x4_t vreinterpret_s16_s32(int32x2_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t)(__p0); + return __ret; +} +__ai int16x4_t vreinterpret_s16_s64(int64x1_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t)(__p0); + return __ret; +} __ai uint64_t vrshld_u64(uint64_t __p0, int64_t __p1) { uint64_t __ret; __ret = (uint64_t) __builtin_neon_vrshld_u64(__p0, __p1); @@ -62245,128 +58277,128 @@ __ai int64_t vrshld_s64(int64_t __p0, int64_t __p1) { __ret; \ }) #ifdef __LITTLE_ENDIAN__ -#define vrshrn_high_n_u32(__p0_807, __p1_807, __p2_807) __extension__ ({ \ - uint16x8_t __ret_807; \ - uint16x4_t __s0_807 = __p0_807; \ - uint32x4_t __s1_807 = __p1_807; \ - __ret_807 = (uint16x8_t)(vcombine_u16((uint16x4_t)(__s0_807), (uint16x4_t)(vrshrn_n_u32(__s1_807, __p2_807)))); \ - __ret_807; \ +#define vrshrn_high_n_u32(__p0_703, __p1_703, __p2_703) __extension__ ({ \ + uint16x8_t __ret_703; \ + uint16x4_t __s0_703 = __p0_703; \ + uint32x4_t __s1_703 = __p1_703; \ + __ret_703 = (uint16x8_t)(vcombine_u16((uint16x4_t)(__s0_703), (uint16x4_t)(vrshrn_n_u32(__s1_703, __p2_703)))); \ + __ret_703; \ }) #else -#define vrshrn_high_n_u32(__p0_808, __p1_808, __p2_808) __extension__ ({ \ - uint16x8_t __ret_808; \ - uint16x4_t __s0_808 = __p0_808; \ - uint32x4_t __s1_808 = __p1_808; \ - uint16x4_t __rev0_808; __rev0_808 = __builtin_shufflevector(__s0_808, __s0_808, 3, 2, 1, 0); \ - uint32x4_t __rev1_808; __rev1_808 = __builtin_shufflevector(__s1_808, __s1_808, 3, 2, 1, 0); \ - __ret_808 = (uint16x8_t)(__noswap_vcombine_u16((uint16x4_t)(__rev0_808), (uint16x4_t)(__noswap_vrshrn_n_u32(__rev1_808, __p2_808)))); \ - __ret_808 = __builtin_shufflevector(__ret_808, __ret_808, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_808; \ +#define vrshrn_high_n_u32(__p0_704, __p1_704, __p2_704) __extension__ ({ \ + uint16x8_t __ret_704; \ + uint16x4_t __s0_704 = __p0_704; \ + uint32x4_t __s1_704 = __p1_704; \ + uint16x4_t __rev0_704; __rev0_704 = __builtin_shufflevector(__s0_704, __s0_704, 3, 2, 1, 0); \ + uint32x4_t __rev1_704; __rev1_704 = __builtin_shufflevector(__s1_704, __s1_704, 3, 2, 1, 0); \ + __ret_704 = (uint16x8_t)(__noswap_vcombine_u16((uint16x4_t)(__rev0_704), (uint16x4_t)(__noswap_vrshrn_n_u32(__rev1_704, __p2_704)))); \ + __ret_704 = __builtin_shufflevector(__ret_704, __ret_704, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_704; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vrshrn_high_n_u64(__p0_809, __p1_809, __p2_809) __extension__ ({ \ - uint32x4_t __ret_809; \ - uint32x2_t __s0_809 = __p0_809; \ - uint64x2_t __s1_809 = __p1_809; \ - __ret_809 = (uint32x4_t)(vcombine_u32((uint32x2_t)(__s0_809), (uint32x2_t)(vrshrn_n_u64(__s1_809, __p2_809)))); \ - __ret_809; \ +#define vrshrn_high_n_u64(__p0_705, __p1_705, __p2_705) __extension__ ({ \ + uint32x4_t __ret_705; \ + uint32x2_t __s0_705 = __p0_705; \ + uint64x2_t __s1_705 = __p1_705; \ + __ret_705 = (uint32x4_t)(vcombine_u32((uint32x2_t)(__s0_705), (uint32x2_t)(vrshrn_n_u64(__s1_705, __p2_705)))); \ + __ret_705; \ }) #else -#define vrshrn_high_n_u64(__p0_810, __p1_810, __p2_810) __extension__ ({ \ - uint32x4_t __ret_810; \ - uint32x2_t __s0_810 = __p0_810; \ - uint64x2_t __s1_810 = __p1_810; \ - uint32x2_t __rev0_810; __rev0_810 = __builtin_shufflevector(__s0_810, __s0_810, 1, 0); \ - uint64x2_t __rev1_810; __rev1_810 = __builtin_shufflevector(__s1_810, __s1_810, 1, 0); \ - __ret_810 = (uint32x4_t)(__noswap_vcombine_u32((uint32x2_t)(__rev0_810), (uint32x2_t)(__noswap_vrshrn_n_u64(__rev1_810, __p2_810)))); \ - __ret_810 = __builtin_shufflevector(__ret_810, __ret_810, 3, 2, 1, 0); \ - __ret_810; \ +#define vrshrn_high_n_u64(__p0_706, __p1_706, __p2_706) __extension__ ({ \ + uint32x4_t __ret_706; \ + uint32x2_t __s0_706 = __p0_706; \ + uint64x2_t __s1_706 = __p1_706; \ + uint32x2_t __rev0_706; __rev0_706 = __builtin_shufflevector(__s0_706, __s0_706, 1, 0); \ + uint64x2_t __rev1_706; __rev1_706 = __builtin_shufflevector(__s1_706, __s1_706, 1, 0); \ + __ret_706 = (uint32x4_t)(__noswap_vcombine_u32((uint32x2_t)(__rev0_706), (uint32x2_t)(__noswap_vrshrn_n_u64(__rev1_706, __p2_706)))); \ + __ret_706 = __builtin_shufflevector(__ret_706, __ret_706, 3, 2, 1, 0); \ + __ret_706; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vrshrn_high_n_u16(__p0_811, __p1_811, __p2_811) __extension__ ({ \ - uint8x16_t __ret_811; \ - uint8x8_t __s0_811 = __p0_811; \ - uint16x8_t __s1_811 = __p1_811; \ - __ret_811 = (uint8x16_t)(vcombine_u8((uint8x8_t)(__s0_811), (uint8x8_t)(vrshrn_n_u16(__s1_811, __p2_811)))); \ - __ret_811; \ +#define vrshrn_high_n_u16(__p0_707, __p1_707, __p2_707) __extension__ ({ \ + uint8x16_t __ret_707; \ + uint8x8_t __s0_707 = __p0_707; \ + uint16x8_t __s1_707 = __p1_707; \ + __ret_707 = (uint8x16_t)(vcombine_u8((uint8x8_t)(__s0_707), (uint8x8_t)(vrshrn_n_u16(__s1_707, __p2_707)))); \ + __ret_707; \ }) #else -#define vrshrn_high_n_u16(__p0_812, __p1_812, __p2_812) __extension__ ({ \ - uint8x16_t __ret_812; \ - uint8x8_t __s0_812 = __p0_812; \ - uint16x8_t __s1_812 = __p1_812; \ - uint8x8_t __rev0_812; __rev0_812 = __builtin_shufflevector(__s0_812, __s0_812, 7, 6, 5, 4, 3, 2, 1, 0); \ - uint16x8_t __rev1_812; __rev1_812 = __builtin_shufflevector(__s1_812, __s1_812, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_812 = (uint8x16_t)(__noswap_vcombine_u8((uint8x8_t)(__rev0_812), (uint8x8_t)(__noswap_vrshrn_n_u16(__rev1_812, __p2_812)))); \ - __ret_812 = __builtin_shufflevector(__ret_812, __ret_812, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_812; \ +#define vrshrn_high_n_u16(__p0_708, __p1_708, __p2_708) __extension__ ({ \ + uint8x16_t __ret_708; \ + uint8x8_t __s0_708 = __p0_708; \ + uint16x8_t __s1_708 = __p1_708; \ + uint8x8_t __rev0_708; __rev0_708 = __builtin_shufflevector(__s0_708, __s0_708, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint16x8_t __rev1_708; __rev1_708 = __builtin_shufflevector(__s1_708, __s1_708, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_708 = (uint8x16_t)(__noswap_vcombine_u8((uint8x8_t)(__rev0_708), (uint8x8_t)(__noswap_vrshrn_n_u16(__rev1_708, __p2_708)))); \ + __ret_708 = __builtin_shufflevector(__ret_708, __ret_708, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_708; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vrshrn_high_n_s32(__p0_813, __p1_813, __p2_813) __extension__ ({ \ - int16x8_t __ret_813; \ - int16x4_t __s0_813 = __p0_813; \ - int32x4_t __s1_813 = __p1_813; \ - __ret_813 = (int16x8_t)(vcombine_s16((int16x4_t)(__s0_813), (int16x4_t)(vrshrn_n_s32(__s1_813, __p2_813)))); \ - __ret_813; \ +#define vrshrn_high_n_s32(__p0_709, __p1_709, __p2_709) __extension__ ({ \ + int16x8_t __ret_709; \ + int16x4_t __s0_709 = __p0_709; \ + int32x4_t __s1_709 = __p1_709; \ + __ret_709 = (int16x8_t)(vcombine_s16((int16x4_t)(__s0_709), (int16x4_t)(vrshrn_n_s32(__s1_709, __p2_709)))); \ + __ret_709; \ }) #else -#define vrshrn_high_n_s32(__p0_814, __p1_814, __p2_814) __extension__ ({ \ - int16x8_t __ret_814; \ - int16x4_t __s0_814 = __p0_814; \ - int32x4_t __s1_814 = __p1_814; \ - int16x4_t __rev0_814; __rev0_814 = __builtin_shufflevector(__s0_814, __s0_814, 3, 2, 1, 0); \ - int32x4_t __rev1_814; __rev1_814 = __builtin_shufflevector(__s1_814, __s1_814, 3, 2, 1, 0); \ - __ret_814 = (int16x8_t)(__noswap_vcombine_s16((int16x4_t)(__rev0_814), (int16x4_t)(__noswap_vrshrn_n_s32(__rev1_814, __p2_814)))); \ - __ret_814 = __builtin_shufflevector(__ret_814, __ret_814, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_814; \ +#define vrshrn_high_n_s32(__p0_710, __p1_710, __p2_710) __extension__ ({ \ + int16x8_t __ret_710; \ + int16x4_t __s0_710 = __p0_710; \ + int32x4_t __s1_710 = __p1_710; \ + int16x4_t __rev0_710; __rev0_710 = __builtin_shufflevector(__s0_710, __s0_710, 3, 2, 1, 0); \ + int32x4_t __rev1_710; __rev1_710 = __builtin_shufflevector(__s1_710, __s1_710, 3, 2, 1, 0); \ + __ret_710 = (int16x8_t)(__noswap_vcombine_s16((int16x4_t)(__rev0_710), (int16x4_t)(__noswap_vrshrn_n_s32(__rev1_710, __p2_710)))); \ + __ret_710 = __builtin_shufflevector(__ret_710, __ret_710, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_710; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vrshrn_high_n_s64(__p0_815, __p1_815, __p2_815) __extension__ ({ \ - int32x4_t __ret_815; \ - int32x2_t __s0_815 = __p0_815; \ - int64x2_t __s1_815 = __p1_815; \ - __ret_815 = (int32x4_t)(vcombine_s32((int32x2_t)(__s0_815), (int32x2_t)(vrshrn_n_s64(__s1_815, __p2_815)))); \ - __ret_815; \ +#define vrshrn_high_n_s64(__p0_711, __p1_711, __p2_711) __extension__ ({ \ + int32x4_t __ret_711; \ + int32x2_t __s0_711 = __p0_711; \ + int64x2_t __s1_711 = __p1_711; \ + __ret_711 = (int32x4_t)(vcombine_s32((int32x2_t)(__s0_711), (int32x2_t)(vrshrn_n_s64(__s1_711, __p2_711)))); \ + __ret_711; \ }) #else -#define vrshrn_high_n_s64(__p0_816, __p1_816, __p2_816) __extension__ ({ \ - int32x4_t __ret_816; \ - int32x2_t __s0_816 = __p0_816; \ - int64x2_t __s1_816 = __p1_816; \ - int32x2_t __rev0_816; __rev0_816 = __builtin_shufflevector(__s0_816, __s0_816, 1, 0); \ - int64x2_t __rev1_816; __rev1_816 = __builtin_shufflevector(__s1_816, __s1_816, 1, 0); \ - __ret_816 = (int32x4_t)(__noswap_vcombine_s32((int32x2_t)(__rev0_816), (int32x2_t)(__noswap_vrshrn_n_s64(__rev1_816, __p2_816)))); \ - __ret_816 = __builtin_shufflevector(__ret_816, __ret_816, 3, 2, 1, 0); \ - __ret_816; \ +#define vrshrn_high_n_s64(__p0_712, __p1_712, __p2_712) __extension__ ({ \ + int32x4_t __ret_712; \ + int32x2_t __s0_712 = __p0_712; \ + int64x2_t __s1_712 = __p1_712; \ + int32x2_t __rev0_712; __rev0_712 = __builtin_shufflevector(__s0_712, __s0_712, 1, 0); \ + int64x2_t __rev1_712; __rev1_712 = __builtin_shufflevector(__s1_712, __s1_712, 1, 0); \ + __ret_712 = (int32x4_t)(__noswap_vcombine_s32((int32x2_t)(__rev0_712), (int32x2_t)(__noswap_vrshrn_n_s64(__rev1_712, __p2_712)))); \ + __ret_712 = __builtin_shufflevector(__ret_712, __ret_712, 3, 2, 1, 0); \ + __ret_712; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vrshrn_high_n_s16(__p0_817, __p1_817, __p2_817) __extension__ ({ \ - int8x16_t __ret_817; \ - int8x8_t __s0_817 = __p0_817; \ - int16x8_t __s1_817 = __p1_817; \ - __ret_817 = (int8x16_t)(vcombine_s8((int8x8_t)(__s0_817), (int8x8_t)(vrshrn_n_s16(__s1_817, __p2_817)))); \ - __ret_817; \ +#define vrshrn_high_n_s16(__p0_713, __p1_713, __p2_713) __extension__ ({ \ + int8x16_t __ret_713; \ + int8x8_t __s0_713 = __p0_713; \ + int16x8_t __s1_713 = __p1_713; \ + __ret_713 = (int8x16_t)(vcombine_s8((int8x8_t)(__s0_713), (int8x8_t)(vrshrn_n_s16(__s1_713, __p2_713)))); \ + __ret_713; \ }) #else -#define vrshrn_high_n_s16(__p0_818, __p1_818, __p2_818) __extension__ ({ \ - int8x16_t __ret_818; \ - int8x8_t __s0_818 = __p0_818; \ - int16x8_t __s1_818 = __p1_818; \ - int8x8_t __rev0_818; __rev0_818 = __builtin_shufflevector(__s0_818, __s0_818, 7, 6, 5, 4, 3, 2, 1, 0); \ - int16x8_t __rev1_818; __rev1_818 = __builtin_shufflevector(__s1_818, __s1_818, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_818 = (int8x16_t)(__noswap_vcombine_s8((int8x8_t)(__rev0_818), (int8x8_t)(__noswap_vrshrn_n_s16(__rev1_818, __p2_818)))); \ - __ret_818 = __builtin_shufflevector(__ret_818, __ret_818, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_818; \ +#define vrshrn_high_n_s16(__p0_714, __p1_714, __p2_714) __extension__ ({ \ + int8x16_t __ret_714; \ + int8x8_t __s0_714 = __p0_714; \ + int16x8_t __s1_714 = __p1_714; \ + int8x8_t __rev0_714; __rev0_714 = __builtin_shufflevector(__s0_714, __s0_714, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __rev1_714; __rev1_714 = __builtin_shufflevector(__s1_714, __s1_714, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_714 = (int8x16_t)(__noswap_vcombine_s8((int8x8_t)(__rev0_714), (int8x8_t)(__noswap_vrshrn_n_s16(__rev1_714, __p2_714)))); \ + __ret_714 = __builtin_shufflevector(__ret_714, __ret_714, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_714; \ }) #endif @@ -62646,110 +58678,110 @@ __ai int64_t vshld_s64(int64_t __p0, int64_t __p1) { __ret; \ }) #ifdef __LITTLE_ENDIAN__ -#define vshll_high_n_u8(__p0_819, __p1_819) __extension__ ({ \ - uint16x8_t __ret_819; \ - uint8x16_t __s0_819 = __p0_819; \ - __ret_819 = (uint16x8_t)(vshll_n_u8(vget_high_u8(__s0_819), __p1_819)); \ - __ret_819; \ +#define vshll_high_n_u8(__p0_715, __p1_715) __extension__ ({ \ + uint16x8_t __ret_715; \ + uint8x16_t __s0_715 = __p0_715; \ + __ret_715 = (uint16x8_t)(vshll_n_u8(vget_high_u8(__s0_715), __p1_715)); \ + __ret_715; \ }) #else -#define vshll_high_n_u8(__p0_820, __p1_820) __extension__ ({ \ - uint16x8_t __ret_820; \ - uint8x16_t __s0_820 = __p0_820; \ - uint8x16_t __rev0_820; __rev0_820 = __builtin_shufflevector(__s0_820, __s0_820, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_820 = (uint16x8_t)(__noswap_vshll_n_u8(__noswap_vget_high_u8(__rev0_820), __p1_820)); \ - __ret_820 = __builtin_shufflevector(__ret_820, __ret_820, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_820; \ +#define vshll_high_n_u8(__p0_716, __p1_716) __extension__ ({ \ + uint16x8_t __ret_716; \ + uint8x16_t __s0_716 = __p0_716; \ + uint8x16_t __rev0_716; __rev0_716 = __builtin_shufflevector(__s0_716, __s0_716, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_716 = (uint16x8_t)(__noswap_vshll_n_u8(__noswap_vget_high_u8(__rev0_716), __p1_716)); \ + __ret_716 = __builtin_shufflevector(__ret_716, __ret_716, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_716; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vshll_high_n_u32(__p0_821, __p1_821) __extension__ ({ \ - uint64x2_t __ret_821; \ - uint32x4_t __s0_821 = __p0_821; \ - __ret_821 = (uint64x2_t)(vshll_n_u32(vget_high_u32(__s0_821), __p1_821)); \ - __ret_821; \ +#define vshll_high_n_u32(__p0_717, __p1_717) __extension__ ({ \ + uint64x2_t __ret_717; \ + uint32x4_t __s0_717 = __p0_717; \ + __ret_717 = (uint64x2_t)(vshll_n_u32(vget_high_u32(__s0_717), __p1_717)); \ + __ret_717; \ }) #else -#define vshll_high_n_u32(__p0_822, __p1_822) __extension__ ({ \ - uint64x2_t __ret_822; \ - uint32x4_t __s0_822 = __p0_822; \ - uint32x4_t __rev0_822; __rev0_822 = __builtin_shufflevector(__s0_822, __s0_822, 3, 2, 1, 0); \ - __ret_822 = (uint64x2_t)(__noswap_vshll_n_u32(__noswap_vget_high_u32(__rev0_822), __p1_822)); \ - __ret_822 = __builtin_shufflevector(__ret_822, __ret_822, 1, 0); \ - __ret_822; \ +#define vshll_high_n_u32(__p0_718, __p1_718) __extension__ ({ \ + uint64x2_t __ret_718; \ + uint32x4_t __s0_718 = __p0_718; \ + uint32x4_t __rev0_718; __rev0_718 = __builtin_shufflevector(__s0_718, __s0_718, 3, 2, 1, 0); \ + __ret_718 = (uint64x2_t)(__noswap_vshll_n_u32(__noswap_vget_high_u32(__rev0_718), __p1_718)); \ + __ret_718 = __builtin_shufflevector(__ret_718, __ret_718, 1, 0); \ + __ret_718; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vshll_high_n_u16(__p0_823, __p1_823) __extension__ ({ \ - uint32x4_t __ret_823; \ - uint16x8_t __s0_823 = __p0_823; \ - __ret_823 = (uint32x4_t)(vshll_n_u16(vget_high_u16(__s0_823), __p1_823)); \ - __ret_823; \ +#define vshll_high_n_u16(__p0_719, __p1_719) __extension__ ({ \ + uint32x4_t __ret_719; \ + uint16x8_t __s0_719 = __p0_719; \ + __ret_719 = (uint32x4_t)(vshll_n_u16(vget_high_u16(__s0_719), __p1_719)); \ + __ret_719; \ }) #else -#define vshll_high_n_u16(__p0_824, __p1_824) __extension__ ({ \ - uint32x4_t __ret_824; \ - uint16x8_t __s0_824 = __p0_824; \ - uint16x8_t __rev0_824; __rev0_824 = __builtin_shufflevector(__s0_824, __s0_824, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_824 = (uint32x4_t)(__noswap_vshll_n_u16(__noswap_vget_high_u16(__rev0_824), __p1_824)); \ - __ret_824 = __builtin_shufflevector(__ret_824, __ret_824, 3, 2, 1, 0); \ - __ret_824; \ +#define vshll_high_n_u16(__p0_720, __p1_720) __extension__ ({ \ + uint32x4_t __ret_720; \ + uint16x8_t __s0_720 = __p0_720; \ + uint16x8_t __rev0_720; __rev0_720 = __builtin_shufflevector(__s0_720, __s0_720, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_720 = (uint32x4_t)(__noswap_vshll_n_u16(__noswap_vget_high_u16(__rev0_720), __p1_720)); \ + __ret_720 = __builtin_shufflevector(__ret_720, __ret_720, 3, 2, 1, 0); \ + __ret_720; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vshll_high_n_s8(__p0_825, __p1_825) __extension__ ({ \ - int16x8_t __ret_825; \ - int8x16_t __s0_825 = __p0_825; \ - __ret_825 = (int16x8_t)(vshll_n_s8(vget_high_s8(__s0_825), __p1_825)); \ - __ret_825; \ +#define vshll_high_n_s8(__p0_721, __p1_721) __extension__ ({ \ + int16x8_t __ret_721; \ + int8x16_t __s0_721 = __p0_721; \ + __ret_721 = (int16x8_t)(vshll_n_s8(vget_high_s8(__s0_721), __p1_721)); \ + __ret_721; \ }) #else -#define vshll_high_n_s8(__p0_826, __p1_826) __extension__ ({ \ - int16x8_t __ret_826; \ - int8x16_t __s0_826 = __p0_826; \ - int8x16_t __rev0_826; __rev0_826 = __builtin_shufflevector(__s0_826, __s0_826, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_826 = (int16x8_t)(__noswap_vshll_n_s8(__noswap_vget_high_s8(__rev0_826), __p1_826)); \ - __ret_826 = __builtin_shufflevector(__ret_826, __ret_826, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_826; \ +#define vshll_high_n_s8(__p0_722, __p1_722) __extension__ ({ \ + int16x8_t __ret_722; \ + int8x16_t __s0_722 = __p0_722; \ + int8x16_t __rev0_722; __rev0_722 = __builtin_shufflevector(__s0_722, __s0_722, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_722 = (int16x8_t)(__noswap_vshll_n_s8(__noswap_vget_high_s8(__rev0_722), __p1_722)); \ + __ret_722 = __builtin_shufflevector(__ret_722, __ret_722, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_722; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vshll_high_n_s32(__p0_827, __p1_827) __extension__ ({ \ - int64x2_t __ret_827; \ - int32x4_t __s0_827 = __p0_827; \ - __ret_827 = (int64x2_t)(vshll_n_s32(vget_high_s32(__s0_827), __p1_827)); \ - __ret_827; \ +#define vshll_high_n_s32(__p0_723, __p1_723) __extension__ ({ \ + int64x2_t __ret_723; \ + int32x4_t __s0_723 = __p0_723; \ + __ret_723 = (int64x2_t)(vshll_n_s32(vget_high_s32(__s0_723), __p1_723)); \ + __ret_723; \ }) #else -#define vshll_high_n_s32(__p0_828, __p1_828) __extension__ ({ \ - int64x2_t __ret_828; \ - int32x4_t __s0_828 = __p0_828; \ - int32x4_t __rev0_828; __rev0_828 = __builtin_shufflevector(__s0_828, __s0_828, 3, 2, 1, 0); \ - __ret_828 = (int64x2_t)(__noswap_vshll_n_s32(__noswap_vget_high_s32(__rev0_828), __p1_828)); \ - __ret_828 = __builtin_shufflevector(__ret_828, __ret_828, 1, 0); \ - __ret_828; \ +#define vshll_high_n_s32(__p0_724, __p1_724) __extension__ ({ \ + int64x2_t __ret_724; \ + int32x4_t __s0_724 = __p0_724; \ + int32x4_t __rev0_724; __rev0_724 = __builtin_shufflevector(__s0_724, __s0_724, 3, 2, 1, 0); \ + __ret_724 = (int64x2_t)(__noswap_vshll_n_s32(__noswap_vget_high_s32(__rev0_724), __p1_724)); \ + __ret_724 = __builtin_shufflevector(__ret_724, __ret_724, 1, 0); \ + __ret_724; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vshll_high_n_s16(__p0_829, __p1_829) __extension__ ({ \ - int32x4_t __ret_829; \ - int16x8_t __s0_829 = __p0_829; \ - __ret_829 = (int32x4_t)(vshll_n_s16(vget_high_s16(__s0_829), __p1_829)); \ - __ret_829; \ +#define vshll_high_n_s16(__p0_725, __p1_725) __extension__ ({ \ + int32x4_t __ret_725; \ + int16x8_t __s0_725 = __p0_725; \ + __ret_725 = (int32x4_t)(vshll_n_s16(vget_high_s16(__s0_725), __p1_725)); \ + __ret_725; \ }) #else -#define vshll_high_n_s16(__p0_830, __p1_830) __extension__ ({ \ - int32x4_t __ret_830; \ - int16x8_t __s0_830 = __p0_830; \ - int16x8_t __rev0_830; __rev0_830 = __builtin_shufflevector(__s0_830, __s0_830, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_830 = (int32x4_t)(__noswap_vshll_n_s16(__noswap_vget_high_s16(__rev0_830), __p1_830)); \ - __ret_830 = __builtin_shufflevector(__ret_830, __ret_830, 3, 2, 1, 0); \ - __ret_830; \ +#define vshll_high_n_s16(__p0_726, __p1_726) __extension__ ({ \ + int32x4_t __ret_726; \ + int16x8_t __s0_726 = __p0_726; \ + int16x8_t __rev0_726; __rev0_726 = __builtin_shufflevector(__s0_726, __s0_726, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_726 = (int32x4_t)(__noswap_vshll_n_s16(__noswap_vget_high_s16(__rev0_726), __p1_726)); \ + __ret_726 = __builtin_shufflevector(__ret_726, __ret_726, 3, 2, 1, 0); \ + __ret_726; \ }) #endif @@ -62766,128 +58798,128 @@ __ai int64_t vshld_s64(int64_t __p0, int64_t __p1) { __ret; \ }) #ifdef __LITTLE_ENDIAN__ -#define vshrn_high_n_u32(__p0_831, __p1_831, __p2_831) __extension__ ({ \ - uint16x8_t __ret_831; \ - uint16x4_t __s0_831 = __p0_831; \ - uint32x4_t __s1_831 = __p1_831; \ - __ret_831 = (uint16x8_t)(vcombine_u16((uint16x4_t)(__s0_831), (uint16x4_t)(vshrn_n_u32(__s1_831, __p2_831)))); \ - __ret_831; \ +#define vshrn_high_n_u32(__p0_727, __p1_727, __p2_727) __extension__ ({ \ + uint16x8_t __ret_727; \ + uint16x4_t __s0_727 = __p0_727; \ + uint32x4_t __s1_727 = __p1_727; \ + __ret_727 = (uint16x8_t)(vcombine_u16((uint16x4_t)(__s0_727), (uint16x4_t)(vshrn_n_u32(__s1_727, __p2_727)))); \ + __ret_727; \ }) #else -#define vshrn_high_n_u32(__p0_832, __p1_832, __p2_832) __extension__ ({ \ - uint16x8_t __ret_832; \ - uint16x4_t __s0_832 = __p0_832; \ - uint32x4_t __s1_832 = __p1_832; \ - uint16x4_t __rev0_832; __rev0_832 = __builtin_shufflevector(__s0_832, __s0_832, 3, 2, 1, 0); \ - uint32x4_t __rev1_832; __rev1_832 = __builtin_shufflevector(__s1_832, __s1_832, 3, 2, 1, 0); \ - __ret_832 = (uint16x8_t)(__noswap_vcombine_u16((uint16x4_t)(__rev0_832), (uint16x4_t)(__noswap_vshrn_n_u32(__rev1_832, __p2_832)))); \ - __ret_832 = __builtin_shufflevector(__ret_832, __ret_832, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_832; \ +#define vshrn_high_n_u32(__p0_728, __p1_728, __p2_728) __extension__ ({ \ + uint16x8_t __ret_728; \ + uint16x4_t __s0_728 = __p0_728; \ + uint32x4_t __s1_728 = __p1_728; \ + uint16x4_t __rev0_728; __rev0_728 = __builtin_shufflevector(__s0_728, __s0_728, 3, 2, 1, 0); \ + uint32x4_t __rev1_728; __rev1_728 = __builtin_shufflevector(__s1_728, __s1_728, 3, 2, 1, 0); \ + __ret_728 = (uint16x8_t)(__noswap_vcombine_u16((uint16x4_t)(__rev0_728), (uint16x4_t)(__noswap_vshrn_n_u32(__rev1_728, __p2_728)))); \ + __ret_728 = __builtin_shufflevector(__ret_728, __ret_728, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_728; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vshrn_high_n_u64(__p0_833, __p1_833, __p2_833) __extension__ ({ \ - uint32x4_t __ret_833; \ - uint32x2_t __s0_833 = __p0_833; \ - uint64x2_t __s1_833 = __p1_833; \ - __ret_833 = (uint32x4_t)(vcombine_u32((uint32x2_t)(__s0_833), (uint32x2_t)(vshrn_n_u64(__s1_833, __p2_833)))); \ - __ret_833; \ +#define vshrn_high_n_u64(__p0_729, __p1_729, __p2_729) __extension__ ({ \ + uint32x4_t __ret_729; \ + uint32x2_t __s0_729 = __p0_729; \ + uint64x2_t __s1_729 = __p1_729; \ + __ret_729 = (uint32x4_t)(vcombine_u32((uint32x2_t)(__s0_729), (uint32x2_t)(vshrn_n_u64(__s1_729, __p2_729)))); \ + __ret_729; \ }) #else -#define vshrn_high_n_u64(__p0_834, __p1_834, __p2_834) __extension__ ({ \ - uint32x4_t __ret_834; \ - uint32x2_t __s0_834 = __p0_834; \ - uint64x2_t __s1_834 = __p1_834; \ - uint32x2_t __rev0_834; __rev0_834 = __builtin_shufflevector(__s0_834, __s0_834, 1, 0); \ - uint64x2_t __rev1_834; __rev1_834 = __builtin_shufflevector(__s1_834, __s1_834, 1, 0); \ - __ret_834 = (uint32x4_t)(__noswap_vcombine_u32((uint32x2_t)(__rev0_834), (uint32x2_t)(__noswap_vshrn_n_u64(__rev1_834, __p2_834)))); \ - __ret_834 = __builtin_shufflevector(__ret_834, __ret_834, 3, 2, 1, 0); \ - __ret_834; \ +#define vshrn_high_n_u64(__p0_730, __p1_730, __p2_730) __extension__ ({ \ + uint32x4_t __ret_730; \ + uint32x2_t __s0_730 = __p0_730; \ + uint64x2_t __s1_730 = __p1_730; \ + uint32x2_t __rev0_730; __rev0_730 = __builtin_shufflevector(__s0_730, __s0_730, 1, 0); \ + uint64x2_t __rev1_730; __rev1_730 = __builtin_shufflevector(__s1_730, __s1_730, 1, 0); \ + __ret_730 = (uint32x4_t)(__noswap_vcombine_u32((uint32x2_t)(__rev0_730), (uint32x2_t)(__noswap_vshrn_n_u64(__rev1_730, __p2_730)))); \ + __ret_730 = __builtin_shufflevector(__ret_730, __ret_730, 3, 2, 1, 0); \ + __ret_730; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vshrn_high_n_u16(__p0_835, __p1_835, __p2_835) __extension__ ({ \ - uint8x16_t __ret_835; \ - uint8x8_t __s0_835 = __p0_835; \ - uint16x8_t __s1_835 = __p1_835; \ - __ret_835 = (uint8x16_t)(vcombine_u8((uint8x8_t)(__s0_835), (uint8x8_t)(vshrn_n_u16(__s1_835, __p2_835)))); \ - __ret_835; \ +#define vshrn_high_n_u16(__p0_731, __p1_731, __p2_731) __extension__ ({ \ + uint8x16_t __ret_731; \ + uint8x8_t __s0_731 = __p0_731; \ + uint16x8_t __s1_731 = __p1_731; \ + __ret_731 = (uint8x16_t)(vcombine_u8((uint8x8_t)(__s0_731), (uint8x8_t)(vshrn_n_u16(__s1_731, __p2_731)))); \ + __ret_731; \ }) #else -#define vshrn_high_n_u16(__p0_836, __p1_836, __p2_836) __extension__ ({ \ - uint8x16_t __ret_836; \ - uint8x8_t __s0_836 = __p0_836; \ - uint16x8_t __s1_836 = __p1_836; \ - uint8x8_t __rev0_836; __rev0_836 = __builtin_shufflevector(__s0_836, __s0_836, 7, 6, 5, 4, 3, 2, 1, 0); \ - uint16x8_t __rev1_836; __rev1_836 = __builtin_shufflevector(__s1_836, __s1_836, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_836 = (uint8x16_t)(__noswap_vcombine_u8((uint8x8_t)(__rev0_836), (uint8x8_t)(__noswap_vshrn_n_u16(__rev1_836, __p2_836)))); \ - __ret_836 = __builtin_shufflevector(__ret_836, __ret_836, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_836; \ +#define vshrn_high_n_u16(__p0_732, __p1_732, __p2_732) __extension__ ({ \ + uint8x16_t __ret_732; \ + uint8x8_t __s0_732 = __p0_732; \ + uint16x8_t __s1_732 = __p1_732; \ + uint8x8_t __rev0_732; __rev0_732 = __builtin_shufflevector(__s0_732, __s0_732, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint16x8_t __rev1_732; __rev1_732 = __builtin_shufflevector(__s1_732, __s1_732, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_732 = (uint8x16_t)(__noswap_vcombine_u8((uint8x8_t)(__rev0_732), (uint8x8_t)(__noswap_vshrn_n_u16(__rev1_732, __p2_732)))); \ + __ret_732 = __builtin_shufflevector(__ret_732, __ret_732, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_732; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vshrn_high_n_s32(__p0_837, __p1_837, __p2_837) __extension__ ({ \ - int16x8_t __ret_837; \ - int16x4_t __s0_837 = __p0_837; \ - int32x4_t __s1_837 = __p1_837; \ - __ret_837 = (int16x8_t)(vcombine_s16((int16x4_t)(__s0_837), (int16x4_t)(vshrn_n_s32(__s1_837, __p2_837)))); \ - __ret_837; \ +#define vshrn_high_n_s32(__p0_733, __p1_733, __p2_733) __extension__ ({ \ + int16x8_t __ret_733; \ + int16x4_t __s0_733 = __p0_733; \ + int32x4_t __s1_733 = __p1_733; \ + __ret_733 = (int16x8_t)(vcombine_s16((int16x4_t)(__s0_733), (int16x4_t)(vshrn_n_s32(__s1_733, __p2_733)))); \ + __ret_733; \ }) #else -#define vshrn_high_n_s32(__p0_838, __p1_838, __p2_838) __extension__ ({ \ - int16x8_t __ret_838; \ - int16x4_t __s0_838 = __p0_838; \ - int32x4_t __s1_838 = __p1_838; \ - int16x4_t __rev0_838; __rev0_838 = __builtin_shufflevector(__s0_838, __s0_838, 3, 2, 1, 0); \ - int32x4_t __rev1_838; __rev1_838 = __builtin_shufflevector(__s1_838, __s1_838, 3, 2, 1, 0); \ - __ret_838 = (int16x8_t)(__noswap_vcombine_s16((int16x4_t)(__rev0_838), (int16x4_t)(__noswap_vshrn_n_s32(__rev1_838, __p2_838)))); \ - __ret_838 = __builtin_shufflevector(__ret_838, __ret_838, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_838; \ +#define vshrn_high_n_s32(__p0_734, __p1_734, __p2_734) __extension__ ({ \ + int16x8_t __ret_734; \ + int16x4_t __s0_734 = __p0_734; \ + int32x4_t __s1_734 = __p1_734; \ + int16x4_t __rev0_734; __rev0_734 = __builtin_shufflevector(__s0_734, __s0_734, 3, 2, 1, 0); \ + int32x4_t __rev1_734; __rev1_734 = __builtin_shufflevector(__s1_734, __s1_734, 3, 2, 1, 0); \ + __ret_734 = (int16x8_t)(__noswap_vcombine_s16((int16x4_t)(__rev0_734), (int16x4_t)(__noswap_vshrn_n_s32(__rev1_734, __p2_734)))); \ + __ret_734 = __builtin_shufflevector(__ret_734, __ret_734, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_734; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vshrn_high_n_s64(__p0_839, __p1_839, __p2_839) __extension__ ({ \ - int32x4_t __ret_839; \ - int32x2_t __s0_839 = __p0_839; \ - int64x2_t __s1_839 = __p1_839; \ - __ret_839 = (int32x4_t)(vcombine_s32((int32x2_t)(__s0_839), (int32x2_t)(vshrn_n_s64(__s1_839, __p2_839)))); \ - __ret_839; \ +#define vshrn_high_n_s64(__p0_735, __p1_735, __p2_735) __extension__ ({ \ + int32x4_t __ret_735; \ + int32x2_t __s0_735 = __p0_735; \ + int64x2_t __s1_735 = __p1_735; \ + __ret_735 = (int32x4_t)(vcombine_s32((int32x2_t)(__s0_735), (int32x2_t)(vshrn_n_s64(__s1_735, __p2_735)))); \ + __ret_735; \ }) #else -#define vshrn_high_n_s64(__p0_840, __p1_840, __p2_840) __extension__ ({ \ - int32x4_t __ret_840; \ - int32x2_t __s0_840 = __p0_840; \ - int64x2_t __s1_840 = __p1_840; \ - int32x2_t __rev0_840; __rev0_840 = __builtin_shufflevector(__s0_840, __s0_840, 1, 0); \ - int64x2_t __rev1_840; __rev1_840 = __builtin_shufflevector(__s1_840, __s1_840, 1, 0); \ - __ret_840 = (int32x4_t)(__noswap_vcombine_s32((int32x2_t)(__rev0_840), (int32x2_t)(__noswap_vshrn_n_s64(__rev1_840, __p2_840)))); \ - __ret_840 = __builtin_shufflevector(__ret_840, __ret_840, 3, 2, 1, 0); \ - __ret_840; \ +#define vshrn_high_n_s64(__p0_736, __p1_736, __p2_736) __extension__ ({ \ + int32x4_t __ret_736; \ + int32x2_t __s0_736 = __p0_736; \ + int64x2_t __s1_736 = __p1_736; \ + int32x2_t __rev0_736; __rev0_736 = __builtin_shufflevector(__s0_736, __s0_736, 1, 0); \ + int64x2_t __rev1_736; __rev1_736 = __builtin_shufflevector(__s1_736, __s1_736, 1, 0); \ + __ret_736 = (int32x4_t)(__noswap_vcombine_s32((int32x2_t)(__rev0_736), (int32x2_t)(__noswap_vshrn_n_s64(__rev1_736, __p2_736)))); \ + __ret_736 = __builtin_shufflevector(__ret_736, __ret_736, 3, 2, 1, 0); \ + __ret_736; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vshrn_high_n_s16(__p0_841, __p1_841, __p2_841) __extension__ ({ \ - int8x16_t __ret_841; \ - int8x8_t __s0_841 = __p0_841; \ - int16x8_t __s1_841 = __p1_841; \ - __ret_841 = (int8x16_t)(vcombine_s8((int8x8_t)(__s0_841), (int8x8_t)(vshrn_n_s16(__s1_841, __p2_841)))); \ - __ret_841; \ +#define vshrn_high_n_s16(__p0_737, __p1_737, __p2_737) __extension__ ({ \ + int8x16_t __ret_737; \ + int8x8_t __s0_737 = __p0_737; \ + int16x8_t __s1_737 = __p1_737; \ + __ret_737 = (int8x16_t)(vcombine_s8((int8x8_t)(__s0_737), (int8x8_t)(vshrn_n_s16(__s1_737, __p2_737)))); \ + __ret_737; \ }) #else -#define vshrn_high_n_s16(__p0_842, __p1_842, __p2_842) __extension__ ({ \ - int8x16_t __ret_842; \ - int8x8_t __s0_842 = __p0_842; \ - int16x8_t __s1_842 = __p1_842; \ - int8x8_t __rev0_842; __rev0_842 = __builtin_shufflevector(__s0_842, __s0_842, 7, 6, 5, 4, 3, 2, 1, 0); \ - int16x8_t __rev1_842; __rev1_842 = __builtin_shufflevector(__s1_842, __s1_842, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_842 = (int8x16_t)(__noswap_vcombine_s8((int8x8_t)(__rev0_842), (int8x8_t)(__noswap_vshrn_n_s16(__rev1_842, __p2_842)))); \ - __ret_842 = __builtin_shufflevector(__ret_842, __ret_842, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_842; \ +#define vshrn_high_n_s16(__p0_738, __p1_738, __p2_738) __extension__ ({ \ + int8x16_t __ret_738; \ + int8x8_t __s0_738 = __p0_738; \ + int16x8_t __s1_738 = __p1_738; \ + int8x8_t __rev0_738; __rev0_738 = __builtin_shufflevector(__s0_738, __s0_738, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __rev1_738; __rev1_738 = __builtin_shufflevector(__s1_738, __s1_738, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_738 = (int8x16_t)(__noswap_vcombine_s8((int8x8_t)(__rev0_738), (int8x8_t)(__noswap_vshrn_n_s16(__rev1_738, __p2_738)))); \ + __ret_738 = __builtin_shufflevector(__ret_738, __ret_738, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_738; \ }) #endif @@ -64322,58 +60354,6 @@ __ai int32x4_t vsubw_high_s16(int32x4_t __p0, int16x8_t __p1) { } #endif -#ifdef __LITTLE_ENDIAN__ -#define vsudotq_laneq_s32(__p0_843, __p1_843, __p2_843, __p3_843) __extension__ ({ \ - int32x4_t __ret_843; \ - int32x4_t __s0_843 = __p0_843; \ - int8x16_t __s1_843 = __p1_843; \ - uint8x16_t __s2_843 = __p2_843; \ -uint8x16_t __reint_843 = __s2_843; \ - __ret_843 = vusdotq_s32(__s0_843, (uint8x16_t)(splatq_laneq_s32(*(int32x4_t *) &__reint_843, __p3_843)), __s1_843); \ - __ret_843; \ -}) -#else -#define vsudotq_laneq_s32(__p0_844, __p1_844, __p2_844, __p3_844) __extension__ ({ \ - int32x4_t __ret_844; \ - int32x4_t __s0_844 = __p0_844; \ - int8x16_t __s1_844 = __p1_844; \ - uint8x16_t __s2_844 = __p2_844; \ - int32x4_t __rev0_844; __rev0_844 = __builtin_shufflevector(__s0_844, __s0_844, 3, 2, 1, 0); \ - int8x16_t __rev1_844; __rev1_844 = __builtin_shufflevector(__s1_844, __s1_844, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - uint8x16_t __rev2_844; __rev2_844 = __builtin_shufflevector(__s2_844, __s2_844, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ -uint8x16_t __reint_844 = __rev2_844; \ - __ret_844 = __noswap_vusdotq_s32(__rev0_844, (uint8x16_t)(__noswap_splatq_laneq_s32(*(int32x4_t *) &__reint_844, __p3_844)), __rev1_844); \ - __ret_844 = __builtin_shufflevector(__ret_844, __ret_844, 3, 2, 1, 0); \ - __ret_844; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vsudot_laneq_s32(__p0_845, __p1_845, __p2_845, __p3_845) __extension__ ({ \ - int32x2_t __ret_845; \ - int32x2_t __s0_845 = __p0_845; \ - int8x8_t __s1_845 = __p1_845; \ - uint8x16_t __s2_845 = __p2_845; \ -uint8x16_t __reint_845 = __s2_845; \ - __ret_845 = vusdot_s32(__s0_845, (uint8x8_t)(splat_laneq_s32(*(int32x4_t *) &__reint_845, __p3_845)), __s1_845); \ - __ret_845; \ -}) -#else -#define vsudot_laneq_s32(__p0_846, __p1_846, __p2_846, __p3_846) __extension__ ({ \ - int32x2_t __ret_846; \ - int32x2_t __s0_846 = __p0_846; \ - int8x8_t __s1_846 = __p1_846; \ - uint8x16_t __s2_846 = __p2_846; \ - int32x2_t __rev0_846; __rev0_846 = __builtin_shufflevector(__s0_846, __s0_846, 1, 0); \ - int8x8_t __rev1_846; __rev1_846 = __builtin_shufflevector(__s1_846, __s1_846, 7, 6, 5, 4, 3, 2, 1, 0); \ - uint8x16_t __rev2_846; __rev2_846 = __builtin_shufflevector(__s2_846, __s2_846, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ -uint8x16_t __reint_846 = __rev2_846; \ - __ret_846 = __noswap_vusdot_s32(__rev0_846, (uint8x8_t)(__noswap_splat_laneq_s32(*(int32x4_t *) &__reint_846, __p3_846)), __rev1_846); \ - __ret_846 = __builtin_shufflevector(__ret_846, __ret_846, 1, 0); \ - __ret_846; \ -}) -#endif - #ifdef __LITTLE_ENDIAN__ __ai poly8x8_t vtrn1_p8(poly8x8_t __p0, poly8x8_t __p1) { poly8x8_t __ret; @@ -65342,58 +61322,6 @@ __ai int16x4_t vuqadd_s16(int16x4_t __p0, uint16x4_t __p1) { } #endif -#ifdef __LITTLE_ENDIAN__ -#define vusdotq_laneq_s32(__p0_847, __p1_847, __p2_847, __p3_847) __extension__ ({ \ - int32x4_t __ret_847; \ - int32x4_t __s0_847 = __p0_847; \ - uint8x16_t __s1_847 = __p1_847; \ - int8x16_t __s2_847 = __p2_847; \ -int8x16_t __reint_847 = __s2_847; \ - __ret_847 = vusdotq_s32(__s0_847, __s1_847, (int8x16_t)(splatq_laneq_s32(*(int32x4_t *) &__reint_847, __p3_847))); \ - __ret_847; \ -}) -#else -#define vusdotq_laneq_s32(__p0_848, __p1_848, __p2_848, __p3_848) __extension__ ({ \ - int32x4_t __ret_848; \ - int32x4_t __s0_848 = __p0_848; \ - uint8x16_t __s1_848 = __p1_848; \ - int8x16_t __s2_848 = __p2_848; \ - int32x4_t __rev0_848; __rev0_848 = __builtin_shufflevector(__s0_848, __s0_848, 3, 2, 1, 0); \ - uint8x16_t __rev1_848; __rev1_848 = __builtin_shufflevector(__s1_848, __s1_848, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - int8x16_t __rev2_848; __rev2_848 = __builtin_shufflevector(__s2_848, __s2_848, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ -int8x16_t __reint_848 = __rev2_848; \ - __ret_848 = __noswap_vusdotq_s32(__rev0_848, __rev1_848, (int8x16_t)(__noswap_splatq_laneq_s32(*(int32x4_t *) &__reint_848, __p3_848))); \ - __ret_848 = __builtin_shufflevector(__ret_848, __ret_848, 3, 2, 1, 0); \ - __ret_848; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vusdot_laneq_s32(__p0_849, __p1_849, __p2_849, __p3_849) __extension__ ({ \ - int32x2_t __ret_849; \ - int32x2_t __s0_849 = __p0_849; \ - uint8x8_t __s1_849 = __p1_849; \ - int8x16_t __s2_849 = __p2_849; \ -int8x16_t __reint_849 = __s2_849; \ - __ret_849 = vusdot_s32(__s0_849, __s1_849, (int8x8_t)(splat_laneq_s32(*(int32x4_t *) &__reint_849, __p3_849))); \ - __ret_849; \ -}) -#else -#define vusdot_laneq_s32(__p0_850, __p1_850, __p2_850, __p3_850) __extension__ ({ \ - int32x2_t __ret_850; \ - int32x2_t __s0_850 = __p0_850; \ - uint8x8_t __s1_850 = __p1_850; \ - int8x16_t __s2_850 = __p2_850; \ - int32x2_t __rev0_850; __rev0_850 = __builtin_shufflevector(__s0_850, __s0_850, 1, 0); \ - uint8x8_t __rev1_850; __rev1_850 = __builtin_shufflevector(__s1_850, __s1_850, 7, 6, 5, 4, 3, 2, 1, 0); \ - int8x16_t __rev2_850; __rev2_850 = __builtin_shufflevector(__s2_850, __s2_850, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ -int8x16_t __reint_850 = __rev2_850; \ - __ret_850 = __noswap_vusdot_s32(__rev0_850, __rev1_850, (int8x8_t)(__noswap_splat_laneq_s32(*(int32x4_t *) &__reint_850, __p3_850))); \ - __ret_850 = __builtin_shufflevector(__ret_850, __ret_850, 1, 0); \ - __ret_850; \ -}) -#endif - #ifdef __LITTLE_ENDIAN__ __ai poly8x8_t vuzp1_p8(poly8x8_t __p0, poly8x8_t __p1) { poly8x8_t __ret; @@ -66890,6 +62818,3916 @@ __ai int16x4_t vzip2_s16(int16x4_t __p0, int16x4_t __p1) { } #endif +__ai __attribute__((target("aes"))) poly128_t vmull_p64(poly64_t __p0, poly64_t __p1) { + poly128_t __ret; + __ret = (poly128_t) __builtin_neon_vmull_p64(__p0, __p1); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("aes"))) poly128_t vmull_high_p64(poly64x2_t __p0, poly64x2_t __p1) { + poly128_t __ret; + __ret = vmull_p64((poly64_t)(vget_high_p64(__p0)), (poly64_t)(vget_high_p64(__p1))); + return __ret; +} +#else +__ai __attribute__((target("aes"))) poly128_t vmull_high_p64(poly64x2_t __p0, poly64x2_t __p1) { + poly128_t __ret; + poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = vmull_p64((poly64_t)(__noswap_vget_high_p64(__rev0)), (poly64_t)(__noswap_vget_high_p64(__rev1))); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("bf16"))) bfloat16x8_t __a64_vcvtq_low_bf16_f32(float32x4_t __p0) { + bfloat16x8_t __ret; + __ret = (bfloat16x8_t) __builtin_neon___a64_vcvtq_low_bf16_f32((int8x16_t)__p0, 43); + return __ret; +} +#else +__ai __attribute__((target("bf16"))) bfloat16x8_t __a64_vcvtq_low_bf16_f32(float32x4_t __p0) { + bfloat16x8_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (bfloat16x8_t) __builtin_neon___a64_vcvtq_low_bf16_f32((int8x16_t)__rev0, 43); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("bf16"))) bfloat16x8_t __noswap___a64_vcvtq_low_bf16_f32(float32x4_t __p0) { + bfloat16x8_t __ret; + __ret = (bfloat16x8_t) __builtin_neon___a64_vcvtq_low_bf16_f32((int8x16_t)__p0, 43); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcopyq_lane_bf16(__p0_739, __p1_739, __p2_739, __p3_739) __extension__ ({ \ + bfloat16x8_t __ret_739; \ + bfloat16x8_t __s0_739 = __p0_739; \ + bfloat16x4_t __s2_739 = __p2_739; \ + __ret_739 = vsetq_lane_bf16(vget_lane_bf16(__s2_739, __p3_739), __s0_739, __p1_739); \ + __ret_739; \ +}) +#else +#define vcopyq_lane_bf16(__p0_740, __p1_740, __p2_740, __p3_740) __extension__ ({ \ + bfloat16x8_t __ret_740; \ + bfloat16x8_t __s0_740 = __p0_740; \ + bfloat16x4_t __s2_740 = __p2_740; \ + bfloat16x8_t __rev0_740; __rev0_740 = __builtin_shufflevector(__s0_740, __s0_740, 7, 6, 5, 4, 3, 2, 1, 0); \ + bfloat16x4_t __rev2_740; __rev2_740 = __builtin_shufflevector(__s2_740, __s2_740, 3, 2, 1, 0); \ + __ret_740 = __noswap_vsetq_lane_bf16(__noswap_vget_lane_bf16(__rev2_740, __p3_740), __rev0_740, __p1_740); \ + __ret_740 = __builtin_shufflevector(__ret_740, __ret_740, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_740; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcopy_lane_bf16(__p0_741, __p1_741, __p2_741, __p3_741) __extension__ ({ \ + bfloat16x4_t __ret_741; \ + bfloat16x4_t __s0_741 = __p0_741; \ + bfloat16x4_t __s2_741 = __p2_741; \ + __ret_741 = vset_lane_bf16(vget_lane_bf16(__s2_741, __p3_741), __s0_741, __p1_741); \ + __ret_741; \ +}) +#else +#define vcopy_lane_bf16(__p0_742, __p1_742, __p2_742, __p3_742) __extension__ ({ \ + bfloat16x4_t __ret_742; \ + bfloat16x4_t __s0_742 = __p0_742; \ + bfloat16x4_t __s2_742 = __p2_742; \ + bfloat16x4_t __rev0_742; __rev0_742 = __builtin_shufflevector(__s0_742, __s0_742, 3, 2, 1, 0); \ + bfloat16x4_t __rev2_742; __rev2_742 = __builtin_shufflevector(__s2_742, __s2_742, 3, 2, 1, 0); \ + __ret_742 = __noswap_vset_lane_bf16(__noswap_vget_lane_bf16(__rev2_742, __p3_742), __rev0_742, __p1_742); \ + __ret_742 = __builtin_shufflevector(__ret_742, __ret_742, 3, 2, 1, 0); \ + __ret_742; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcopyq_laneq_bf16(__p0_743, __p1_743, __p2_743, __p3_743) __extension__ ({ \ + bfloat16x8_t __ret_743; \ + bfloat16x8_t __s0_743 = __p0_743; \ + bfloat16x8_t __s2_743 = __p2_743; \ + __ret_743 = vsetq_lane_bf16(vgetq_lane_bf16(__s2_743, __p3_743), __s0_743, __p1_743); \ + __ret_743; \ +}) +#else +#define vcopyq_laneq_bf16(__p0_744, __p1_744, __p2_744, __p3_744) __extension__ ({ \ + bfloat16x8_t __ret_744; \ + bfloat16x8_t __s0_744 = __p0_744; \ + bfloat16x8_t __s2_744 = __p2_744; \ + bfloat16x8_t __rev0_744; __rev0_744 = __builtin_shufflevector(__s0_744, __s0_744, 7, 6, 5, 4, 3, 2, 1, 0); \ + bfloat16x8_t __rev2_744; __rev2_744 = __builtin_shufflevector(__s2_744, __s2_744, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_744 = __noswap_vsetq_lane_bf16(__noswap_vgetq_lane_bf16(__rev2_744, __p3_744), __rev0_744, __p1_744); \ + __ret_744 = __builtin_shufflevector(__ret_744, __ret_744, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_744; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcopy_laneq_bf16(__p0_745, __p1_745, __p2_745, __p3_745) __extension__ ({ \ + bfloat16x4_t __ret_745; \ + bfloat16x4_t __s0_745 = __p0_745; \ + bfloat16x8_t __s2_745 = __p2_745; \ + __ret_745 = vset_lane_bf16(vgetq_lane_bf16(__s2_745, __p3_745), __s0_745, __p1_745); \ + __ret_745; \ +}) +#else +#define vcopy_laneq_bf16(__p0_746, __p1_746, __p2_746, __p3_746) __extension__ ({ \ + bfloat16x4_t __ret_746; \ + bfloat16x4_t __s0_746 = __p0_746; \ + bfloat16x8_t __s2_746 = __p2_746; \ + bfloat16x4_t __rev0_746; __rev0_746 = __builtin_shufflevector(__s0_746, __s0_746, 3, 2, 1, 0); \ + bfloat16x8_t __rev2_746; __rev2_746 = __builtin_shufflevector(__s2_746, __s2_746, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_746 = __noswap_vset_lane_bf16(__noswap_vgetq_lane_bf16(__rev2_746, __p3_746), __rev0_746, __p1_746); \ + __ret_746 = __builtin_shufflevector(__ret_746, __ret_746, 3, 2, 1, 0); \ + __ret_746; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("bf16"))) bfloat16x4_t vcvt_bf16_f32(float32x4_t __p0) { + bfloat16x4_t __ret; + __ret = vget_low_bf16(__a64_vcvtq_low_bf16_f32(__p0)); + return __ret; +} +#else +__ai __attribute__((target("bf16"))) bfloat16x4_t vcvt_bf16_f32(float32x4_t __p0) { + bfloat16x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = __noswap_vget_low_bf16(__noswap___a64_vcvtq_low_bf16_f32(__rev0)); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("bf16"))) bfloat16x8_t vcvtq_high_bf16_f32(bfloat16x8_t __p0, float32x4_t __p1) { + bfloat16x8_t __ret; + __ret = (bfloat16x8_t) __builtin_neon_vcvtq_high_bf16_f32((int8x16_t)__p0, (int8x16_t)__p1, 43); + return __ret; +} +#else +__ai __attribute__((target("bf16"))) bfloat16x8_t vcvtq_high_bf16_f32(bfloat16x8_t __p0, float32x4_t __p1) { + bfloat16x8_t __ret; + bfloat16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (bfloat16x8_t) __builtin_neon_vcvtq_high_bf16_f32((int8x16_t)__rev0, (int8x16_t)__rev1, 43); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("bf16"))) bfloat16x8_t vcvtq_low_bf16_f32(float32x4_t __p0) { + bfloat16x8_t __ret; + __ret = __a64_vcvtq_low_bf16_f32(__p0); + return __ret; +} +#else +__ai __attribute__((target("bf16"))) bfloat16x8_t vcvtq_low_bf16_f32(float32x4_t __p0) { + bfloat16x8_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = __noswap___a64_vcvtq_low_bf16_f32(__rev0); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +__ai __attribute__((target("bf16"))) poly8x8_t vreinterpret_p8_bf16(bfloat16x4_t __p0) { + poly8x8_t __ret; + __ret = (poly8x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16"))) poly64x1_t vreinterpret_p64_bf16(bfloat16x4_t __p0) { + poly64x1_t __ret; + __ret = (poly64x1_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16"))) poly16x4_t vreinterpret_p16_bf16(bfloat16x4_t __p0) { + poly16x4_t __ret; + __ret = (poly16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16"))) poly8x16_t vreinterpretq_p8_bf16(bfloat16x8_t __p0) { + poly8x16_t __ret; + __ret = (poly8x16_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16"))) poly128_t vreinterpretq_p128_bf16(bfloat16x8_t __p0) { + poly128_t __ret; + __ret = (poly128_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16"))) poly64x2_t vreinterpretq_p64_bf16(bfloat16x8_t __p0) { + poly64x2_t __ret; + __ret = (poly64x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16"))) poly16x8_t vreinterpretq_p16_bf16(bfloat16x8_t __p0) { + poly16x8_t __ret; + __ret = (poly16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16"))) uint8x16_t vreinterpretq_u8_bf16(bfloat16x8_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16"))) uint32x4_t vreinterpretq_u32_bf16(bfloat16x8_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16"))) uint64x2_t vreinterpretq_u64_bf16(bfloat16x8_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16"))) uint16x8_t vreinterpretq_u16_bf16(bfloat16x8_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16"))) int8x16_t vreinterpretq_s8_bf16(bfloat16x8_t __p0) { + int8x16_t __ret; + __ret = (int8x16_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16"))) float64x2_t vreinterpretq_f64_bf16(bfloat16x8_t __p0) { + float64x2_t __ret; + __ret = (float64x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16"))) float32x4_t vreinterpretq_f32_bf16(bfloat16x8_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16"))) float16x8_t vreinterpretq_f16_bf16(bfloat16x8_t __p0) { + float16x8_t __ret; + __ret = (float16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16"))) int32x4_t vreinterpretq_s32_bf16(bfloat16x8_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16"))) int64x2_t vreinterpretq_s64_bf16(bfloat16x8_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16"))) int16x8_t vreinterpretq_s16_bf16(bfloat16x8_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16"))) uint8x8_t vreinterpret_u8_bf16(bfloat16x4_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16"))) uint32x2_t vreinterpret_u32_bf16(bfloat16x4_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16"))) uint64x1_t vreinterpret_u64_bf16(bfloat16x4_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16"))) uint16x4_t vreinterpret_u16_bf16(bfloat16x4_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16"))) int8x8_t vreinterpret_s8_bf16(bfloat16x4_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16"))) float64x1_t vreinterpret_f64_bf16(bfloat16x4_t __p0) { + float64x1_t __ret; + __ret = (float64x1_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16"))) float32x2_t vreinterpret_f32_bf16(bfloat16x4_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16"))) float16x4_t vreinterpret_f16_bf16(bfloat16x4_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16"))) int32x2_t vreinterpret_s32_bf16(bfloat16x4_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16"))) int64x1_t vreinterpret_s64_bf16(bfloat16x4_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16"))) int16x4_t vreinterpret_s16_bf16(bfloat16x4_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16"))) bfloat16x8_t vreinterpretq_bf16_p8(poly8x16_t __p0) { + bfloat16x8_t __ret; + __ret = (bfloat16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16"))) bfloat16x8_t vreinterpretq_bf16_p128(poly128_t __p0) { + bfloat16x8_t __ret; + __ret = (bfloat16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16"))) bfloat16x8_t vreinterpretq_bf16_p64(poly64x2_t __p0) { + bfloat16x8_t __ret; + __ret = (bfloat16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16"))) bfloat16x8_t vreinterpretq_bf16_p16(poly16x8_t __p0) { + bfloat16x8_t __ret; + __ret = (bfloat16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16"))) bfloat16x8_t vreinterpretq_bf16_u8(uint8x16_t __p0) { + bfloat16x8_t __ret; + __ret = (bfloat16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16"))) bfloat16x8_t vreinterpretq_bf16_u32(uint32x4_t __p0) { + bfloat16x8_t __ret; + __ret = (bfloat16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16"))) bfloat16x8_t vreinterpretq_bf16_u64(uint64x2_t __p0) { + bfloat16x8_t __ret; + __ret = (bfloat16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16"))) bfloat16x8_t vreinterpretq_bf16_u16(uint16x8_t __p0) { + bfloat16x8_t __ret; + __ret = (bfloat16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16"))) bfloat16x8_t vreinterpretq_bf16_s8(int8x16_t __p0) { + bfloat16x8_t __ret; + __ret = (bfloat16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16"))) bfloat16x8_t vreinterpretq_bf16_f64(float64x2_t __p0) { + bfloat16x8_t __ret; + __ret = (bfloat16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16"))) bfloat16x8_t vreinterpretq_bf16_f32(float32x4_t __p0) { + bfloat16x8_t __ret; + __ret = (bfloat16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16"))) bfloat16x8_t vreinterpretq_bf16_f16(float16x8_t __p0) { + bfloat16x8_t __ret; + __ret = (bfloat16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16"))) bfloat16x8_t vreinterpretq_bf16_s32(int32x4_t __p0) { + bfloat16x8_t __ret; + __ret = (bfloat16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16"))) bfloat16x8_t vreinterpretq_bf16_s64(int64x2_t __p0) { + bfloat16x8_t __ret; + __ret = (bfloat16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16"))) bfloat16x8_t vreinterpretq_bf16_s16(int16x8_t __p0) { + bfloat16x8_t __ret; + __ret = (bfloat16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16"))) bfloat16x4_t vreinterpret_bf16_p8(poly8x8_t __p0) { + bfloat16x4_t __ret; + __ret = (bfloat16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16"))) bfloat16x4_t vreinterpret_bf16_p64(poly64x1_t __p0) { + bfloat16x4_t __ret; + __ret = (bfloat16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16"))) bfloat16x4_t vreinterpret_bf16_p16(poly16x4_t __p0) { + bfloat16x4_t __ret; + __ret = (bfloat16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16"))) bfloat16x4_t vreinterpret_bf16_u8(uint8x8_t __p0) { + bfloat16x4_t __ret; + __ret = (bfloat16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16"))) bfloat16x4_t vreinterpret_bf16_u32(uint32x2_t __p0) { + bfloat16x4_t __ret; + __ret = (bfloat16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16"))) bfloat16x4_t vreinterpret_bf16_u64(uint64x1_t __p0) { + bfloat16x4_t __ret; + __ret = (bfloat16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16"))) bfloat16x4_t vreinterpret_bf16_u16(uint16x4_t __p0) { + bfloat16x4_t __ret; + __ret = (bfloat16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16"))) bfloat16x4_t vreinterpret_bf16_s8(int8x8_t __p0) { + bfloat16x4_t __ret; + __ret = (bfloat16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16"))) bfloat16x4_t vreinterpret_bf16_f64(float64x1_t __p0) { + bfloat16x4_t __ret; + __ret = (bfloat16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16"))) bfloat16x4_t vreinterpret_bf16_f32(float32x2_t __p0) { + bfloat16x4_t __ret; + __ret = (bfloat16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16"))) bfloat16x4_t vreinterpret_bf16_f16(float16x4_t __p0) { + bfloat16x4_t __ret; + __ret = (bfloat16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16"))) bfloat16x4_t vreinterpret_bf16_s32(int32x2_t __p0) { + bfloat16x4_t __ret; + __ret = (bfloat16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16"))) bfloat16x4_t vreinterpret_bf16_s64(int64x1_t __p0) { + bfloat16x4_t __ret; + __ret = (bfloat16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16"))) bfloat16x4_t vreinterpret_bf16_s16(int16x4_t __p0) { + bfloat16x4_t __ret; + __ret = (bfloat16x4_t)(__p0); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +#define vdotq_laneq_u32(__p0_747, __p1_747, __p2_747, __p3_747) __extension__ ({ \ + uint32x4_t __ret_747; \ + uint32x4_t __s0_747 = __p0_747; \ + uint8x16_t __s1_747 = __p1_747; \ + uint8x16_t __s2_747 = __p2_747; \ +uint8x16_t __reint_747 = __s2_747; \ +uint32x4_t __reint1_747 = splatq_laneq_u32(*(uint32x4_t *) &__reint_747, __p3_747); \ + __ret_747 = vdotq_u32(__s0_747, __s1_747, *(uint8x16_t *) &__reint1_747); \ + __ret_747; \ +}) +#else +#define vdotq_laneq_u32(__p0_748, __p1_748, __p2_748, __p3_748) __extension__ ({ \ + uint32x4_t __ret_748; \ + uint32x4_t __s0_748 = __p0_748; \ + uint8x16_t __s1_748 = __p1_748; \ + uint8x16_t __s2_748 = __p2_748; \ + uint32x4_t __rev0_748; __rev0_748 = __builtin_shufflevector(__s0_748, __s0_748, 3, 2, 1, 0); \ + uint8x16_t __rev1_748; __rev1_748 = __builtin_shufflevector(__s1_748, __s1_748, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint8x16_t __rev2_748; __rev2_748 = __builtin_shufflevector(__s2_748, __s2_748, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ +uint8x16_t __reint_748 = __rev2_748; \ +uint32x4_t __reint1_748 = __noswap_splatq_laneq_u32(*(uint32x4_t *) &__reint_748, __p3_748); \ + __ret_748 = __noswap_vdotq_u32(__rev0_748, __rev1_748, *(uint8x16_t *) &__reint1_748); \ + __ret_748 = __builtin_shufflevector(__ret_748, __ret_748, 3, 2, 1, 0); \ + __ret_748; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdotq_laneq_s32(__p0_749, __p1_749, __p2_749, __p3_749) __extension__ ({ \ + int32x4_t __ret_749; \ + int32x4_t __s0_749 = __p0_749; \ + int8x16_t __s1_749 = __p1_749; \ + int8x16_t __s2_749 = __p2_749; \ +int8x16_t __reint_749 = __s2_749; \ +int32x4_t __reint1_749 = splatq_laneq_s32(*(int32x4_t *) &__reint_749, __p3_749); \ + __ret_749 = vdotq_s32(__s0_749, __s1_749, *(int8x16_t *) &__reint1_749); \ + __ret_749; \ +}) +#else +#define vdotq_laneq_s32(__p0_750, __p1_750, __p2_750, __p3_750) __extension__ ({ \ + int32x4_t __ret_750; \ + int32x4_t __s0_750 = __p0_750; \ + int8x16_t __s1_750 = __p1_750; \ + int8x16_t __s2_750 = __p2_750; \ + int32x4_t __rev0_750; __rev0_750 = __builtin_shufflevector(__s0_750, __s0_750, 3, 2, 1, 0); \ + int8x16_t __rev1_750; __rev1_750 = __builtin_shufflevector(__s1_750, __s1_750, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + int8x16_t __rev2_750; __rev2_750 = __builtin_shufflevector(__s2_750, __s2_750, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ +int8x16_t __reint_750 = __rev2_750; \ +int32x4_t __reint1_750 = __noswap_splatq_laneq_s32(*(int32x4_t *) &__reint_750, __p3_750); \ + __ret_750 = __noswap_vdotq_s32(__rev0_750, __rev1_750, *(int8x16_t *) &__reint1_750); \ + __ret_750 = __builtin_shufflevector(__ret_750, __ret_750, 3, 2, 1, 0); \ + __ret_750; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdot_laneq_u32(__p0_751, __p1_751, __p2_751, __p3_751) __extension__ ({ \ + uint32x2_t __ret_751; \ + uint32x2_t __s0_751 = __p0_751; \ + uint8x8_t __s1_751 = __p1_751; \ + uint8x16_t __s2_751 = __p2_751; \ +uint8x16_t __reint_751 = __s2_751; \ +uint32x2_t __reint1_751 = splat_laneq_u32(*(uint32x4_t *) &__reint_751, __p3_751); \ + __ret_751 = vdot_u32(__s0_751, __s1_751, *(uint8x8_t *) &__reint1_751); \ + __ret_751; \ +}) +#else +#define vdot_laneq_u32(__p0_752, __p1_752, __p2_752, __p3_752) __extension__ ({ \ + uint32x2_t __ret_752; \ + uint32x2_t __s0_752 = __p0_752; \ + uint8x8_t __s1_752 = __p1_752; \ + uint8x16_t __s2_752 = __p2_752; \ + uint32x2_t __rev0_752; __rev0_752 = __builtin_shufflevector(__s0_752, __s0_752, 1, 0); \ + uint8x8_t __rev1_752; __rev1_752 = __builtin_shufflevector(__s1_752, __s1_752, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint8x16_t __rev2_752; __rev2_752 = __builtin_shufflevector(__s2_752, __s2_752, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ +uint8x16_t __reint_752 = __rev2_752; \ +uint32x2_t __reint1_752 = __noswap_splat_laneq_u32(*(uint32x4_t *) &__reint_752, __p3_752); \ + __ret_752 = __noswap_vdot_u32(__rev0_752, __rev1_752, *(uint8x8_t *) &__reint1_752); \ + __ret_752 = __builtin_shufflevector(__ret_752, __ret_752, 1, 0); \ + __ret_752; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdot_laneq_s32(__p0_753, __p1_753, __p2_753, __p3_753) __extension__ ({ \ + int32x2_t __ret_753; \ + int32x2_t __s0_753 = __p0_753; \ + int8x8_t __s1_753 = __p1_753; \ + int8x16_t __s2_753 = __p2_753; \ +int8x16_t __reint_753 = __s2_753; \ +int32x2_t __reint1_753 = splat_laneq_s32(*(int32x4_t *) &__reint_753, __p3_753); \ + __ret_753 = vdot_s32(__s0_753, __s1_753, *(int8x8_t *) &__reint1_753); \ + __ret_753; \ +}) +#else +#define vdot_laneq_s32(__p0_754, __p1_754, __p2_754, __p3_754) __extension__ ({ \ + int32x2_t __ret_754; \ + int32x2_t __s0_754 = __p0_754; \ + int8x8_t __s1_754 = __p1_754; \ + int8x16_t __s2_754 = __p2_754; \ + int32x2_t __rev0_754; __rev0_754 = __builtin_shufflevector(__s0_754, __s0_754, 1, 0); \ + int8x8_t __rev1_754; __rev1_754 = __builtin_shufflevector(__s1_754, __s1_754, 7, 6, 5, 4, 3, 2, 1, 0); \ + int8x16_t __rev2_754; __rev2_754 = __builtin_shufflevector(__s2_754, __s2_754, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ +int8x16_t __reint_754 = __rev2_754; \ +int32x2_t __reint1_754 = __noswap_splat_laneq_s32(*(int32x4_t *) &__reint_754, __p3_754); \ + __ret_754 = __noswap_vdot_s32(__rev0_754, __rev1_754, *(int8x8_t *) &__reint1_754); \ + __ret_754 = __builtin_shufflevector(__ret_754, __ret_754, 1, 0); \ + __ret_754; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fp16fml"))) float32x4_t vfmlalq_high_f16(float32x4_t __p0, float16x8_t __p1, float16x8_t __p2) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vfmlalq_high_f16((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); + return __ret; +} +#else +__ai __attribute__((target("fp16fml"))) float32x4_t vfmlalq_high_f16(float32x4_t __p0, float16x8_t __p1, float16x8_t __p2) { + float32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (float32x4_t) __builtin_neon_vfmlalq_high_f16((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("fp16fml"))) float32x4_t __noswap_vfmlalq_high_f16(float32x4_t __p0, float16x8_t __p1, float16x8_t __p2) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vfmlalq_high_f16((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fp16fml"))) float32x2_t vfmlal_high_f16(float32x2_t __p0, float16x4_t __p1, float16x4_t __p2) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vfmlal_high_f16((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9); + return __ret; +} +#else +__ai __attribute__((target("fp16fml"))) float32x2_t vfmlal_high_f16(float32x2_t __p0, float16x4_t __p1, float16x4_t __p2) { + float32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + float16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = (float32x2_t) __builtin_neon_vfmlal_high_f16((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 9); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai __attribute__((target("fp16fml"))) float32x2_t __noswap_vfmlal_high_f16(float32x2_t __p0, float16x4_t __p1, float16x4_t __p2) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vfmlal_high_f16((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fp16fml"))) float32x4_t vfmlalq_low_f16(float32x4_t __p0, float16x8_t __p1, float16x8_t __p2) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vfmlalq_low_f16((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); + return __ret; +} +#else +__ai __attribute__((target("fp16fml"))) float32x4_t vfmlalq_low_f16(float32x4_t __p0, float16x8_t __p1, float16x8_t __p2) { + float32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (float32x4_t) __builtin_neon_vfmlalq_low_f16((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("fp16fml"))) float32x4_t __noswap_vfmlalq_low_f16(float32x4_t __p0, float16x8_t __p1, float16x8_t __p2) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vfmlalq_low_f16((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fp16fml"))) float32x2_t vfmlal_low_f16(float32x2_t __p0, float16x4_t __p1, float16x4_t __p2) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vfmlal_low_f16((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9); + return __ret; +} +#else +__ai __attribute__((target("fp16fml"))) float32x2_t vfmlal_low_f16(float32x2_t __p0, float16x4_t __p1, float16x4_t __p2) { + float32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + float16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = (float32x2_t) __builtin_neon_vfmlal_low_f16((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 9); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai __attribute__((target("fp16fml"))) float32x2_t __noswap_vfmlal_low_f16(float32x2_t __p0, float16x4_t __p1, float16x4_t __p2) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vfmlal_low_f16((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fp16fml"))) float32x4_t vfmlslq_high_f16(float32x4_t __p0, float16x8_t __p1, float16x8_t __p2) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vfmlslq_high_f16((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); + return __ret; +} +#else +__ai __attribute__((target("fp16fml"))) float32x4_t vfmlslq_high_f16(float32x4_t __p0, float16x8_t __p1, float16x8_t __p2) { + float32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (float32x4_t) __builtin_neon_vfmlslq_high_f16((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("fp16fml"))) float32x4_t __noswap_vfmlslq_high_f16(float32x4_t __p0, float16x8_t __p1, float16x8_t __p2) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vfmlslq_high_f16((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fp16fml"))) float32x2_t vfmlsl_high_f16(float32x2_t __p0, float16x4_t __p1, float16x4_t __p2) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vfmlsl_high_f16((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9); + return __ret; +} +#else +__ai __attribute__((target("fp16fml"))) float32x2_t vfmlsl_high_f16(float32x2_t __p0, float16x4_t __p1, float16x4_t __p2) { + float32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + float16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = (float32x2_t) __builtin_neon_vfmlsl_high_f16((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 9); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai __attribute__((target("fp16fml"))) float32x2_t __noswap_vfmlsl_high_f16(float32x2_t __p0, float16x4_t __p1, float16x4_t __p2) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vfmlsl_high_f16((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fp16fml"))) float32x4_t vfmlslq_low_f16(float32x4_t __p0, float16x8_t __p1, float16x8_t __p2) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vfmlslq_low_f16((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); + return __ret; +} +#else +__ai __attribute__((target("fp16fml"))) float32x4_t vfmlslq_low_f16(float32x4_t __p0, float16x8_t __p1, float16x8_t __p2) { + float32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (float32x4_t) __builtin_neon_vfmlslq_low_f16((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("fp16fml"))) float32x4_t __noswap_vfmlslq_low_f16(float32x4_t __p0, float16x8_t __p1, float16x8_t __p2) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vfmlslq_low_f16((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fp16fml"))) float32x2_t vfmlsl_low_f16(float32x2_t __p0, float16x4_t __p1, float16x4_t __p2) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vfmlsl_low_f16((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9); + return __ret; +} +#else +__ai __attribute__((target("fp16fml"))) float32x2_t vfmlsl_low_f16(float32x2_t __p0, float16x4_t __p1, float16x4_t __p2) { + float32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + float16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = (float32x2_t) __builtin_neon_vfmlsl_low_f16((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 9); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai __attribute__((target("fp16fml"))) float32x2_t __noswap_vfmlsl_low_f16(float32x2_t __p0, float16x4_t __p1, float16x4_t __p2) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vfmlsl_low_f16((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) float16x8_t vdivq_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + __ret = __p0 / __p1; + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) float16x8_t vdivq_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 / __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) float16x4_t vdiv_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + __ret = __p0 / __p1; + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) float16x4_t vdiv_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __rev0 / __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vduph_lane_f16(__p0, __p1) __extension__ ({ \ + float16_t __ret; \ + float16x4_t __s0 = __p0; \ + __ret = (float16_t) __builtin_neon_vduph_lane_f16((float16x4_t)__s0, __p1); \ + __ret; \ +}) +#else +#define vduph_lane_f16(__p0, __p1) __extension__ ({ \ + float16_t __ret; \ + float16x4_t __s0 = __p0; \ + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + __ret = (float16_t) __builtin_neon_vduph_lane_f16((float16x4_t)__rev0, __p1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vduph_laneq_f16(__p0, __p1) __extension__ ({ \ + float16_t __ret; \ + float16x8_t __s0 = __p0; \ + __ret = (float16_t) __builtin_neon_vduph_laneq_f16((float16x8_t)__s0, __p1); \ + __ret; \ +}) +#else +#define vduph_laneq_f16(__p0, __p1) __extension__ ({ \ + float16_t __ret; \ + float16x8_t __s0 = __p0; \ + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (float16_t) __builtin_neon_vduph_laneq_f16((float16x8_t)__rev0, __p1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfmah_lane_f16(__p0, __p1, __p2, __p3) __extension__ ({ \ + float16_t __ret; \ + float16_t __s0 = __p0; \ + float16_t __s1 = __p1; \ + float16x4_t __s2 = __p2; \ + __ret = (float16_t) __builtin_neon_vfmah_lane_f16(__s0, __s1, (float16x4_t)__s2, __p3); \ + __ret; \ +}) +#else +#define vfmah_lane_f16(__p0, __p1, __p2, __p3) __extension__ ({ \ + float16_t __ret; \ + float16_t __s0 = __p0; \ + float16_t __s1 = __p1; \ + float16x4_t __s2 = __p2; \ + float16x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \ + __ret = (float16_t) __builtin_neon_vfmah_lane_f16(__s0, __s1, (float16x4_t)__rev2, __p3); \ + __ret; \ +}) +#define __noswap_vfmah_lane_f16(__p0, __p1, __p2, __p3) __extension__ ({ \ + float16_t __ret; \ + float16_t __s0 = __p0; \ + float16_t __s1 = __p1; \ + float16x4_t __s2 = __p2; \ + __ret = (float16_t) __builtin_neon_vfmah_lane_f16(__s0, __s1, (float16x4_t)__s2, __p3); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfmaq_lane_f16(__p0, __p1, __p2, __p3) __extension__ ({ \ + float16x8_t __ret; \ + float16x8_t __s0 = __p0; \ + float16x8_t __s1 = __p1; \ + float16x4_t __s2 = __p2; \ + __ret = (float16x8_t) __builtin_neon_vfmaq_lane_f16((int8x16_t)__s0, (int8x16_t)__s1, (int8x8_t)__s2, __p3, 40); \ + __ret; \ +}) +#else +#define vfmaq_lane_f16(__p0, __p1, __p2, __p3) __extension__ ({ \ + float16x8_t __ret; \ + float16x8_t __s0 = __p0; \ + float16x8_t __s1 = __p1; \ + float16x4_t __s2 = __p2; \ + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \ + __ret = (float16x8_t) __builtin_neon_vfmaq_lane_f16((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x8_t)__rev2, __p3, 40); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_vfmaq_lane_f16(__p0, __p1, __p2, __p3) __extension__ ({ \ + float16x8_t __ret; \ + float16x8_t __s0 = __p0; \ + float16x8_t __s1 = __p1; \ + float16x4_t __s2 = __p2; \ + __ret = (float16x8_t) __builtin_neon_vfmaq_lane_f16((int8x16_t)__s0, (int8x16_t)__s1, (int8x8_t)__s2, __p3, 40); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfma_lane_f16(__p0, __p1, __p2, __p3) __extension__ ({ \ + float16x4_t __ret; \ + float16x4_t __s0 = __p0; \ + float16x4_t __s1 = __p1; \ + float16x4_t __s2 = __p2; \ + __ret = (float16x4_t) __builtin_neon_vfma_lane_f16((int8x8_t)__s0, (int8x8_t)__s1, (int8x8_t)__s2, __p3, 8); \ + __ret; \ +}) +#else +#define vfma_lane_f16(__p0, __p1, __p2, __p3) __extension__ ({ \ + float16x4_t __ret; \ + float16x4_t __s0 = __p0; \ + float16x4_t __s1 = __p1; \ + float16x4_t __s2 = __p2; \ + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + float16x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \ + __ret = (float16x4_t) __builtin_neon_vfma_lane_f16((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, __p3, 8); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_vfma_lane_f16(__p0, __p1, __p2, __p3) __extension__ ({ \ + float16x4_t __ret; \ + float16x4_t __s0 = __p0; \ + float16x4_t __s1 = __p1; \ + float16x4_t __s2 = __p2; \ + __ret = (float16x4_t) __builtin_neon_vfma_lane_f16((int8x8_t)__s0, (int8x8_t)__s1, (int8x8_t)__s2, __p3, 8); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfmah_laneq_f16(__p0, __p1, __p2, __p3) __extension__ ({ \ + float16_t __ret; \ + float16_t __s0 = __p0; \ + float16_t __s1 = __p1; \ + float16x8_t __s2 = __p2; \ + __ret = (float16_t) __builtin_neon_vfmah_laneq_f16(__s0, __s1, (float16x8_t)__s2, __p3); \ + __ret; \ +}) +#else +#define vfmah_laneq_f16(__p0, __p1, __p2, __p3) __extension__ ({ \ + float16_t __ret; \ + float16_t __s0 = __p0; \ + float16_t __s1 = __p1; \ + float16x8_t __s2 = __p2; \ + float16x8_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (float16_t) __builtin_neon_vfmah_laneq_f16(__s0, __s1, (float16x8_t)__rev2, __p3); \ + __ret; \ +}) +#define __noswap_vfmah_laneq_f16(__p0, __p1, __p2, __p3) __extension__ ({ \ + float16_t __ret; \ + float16_t __s0 = __p0; \ + float16_t __s1 = __p1; \ + float16x8_t __s2 = __p2; \ + __ret = (float16_t) __builtin_neon_vfmah_laneq_f16(__s0, __s1, (float16x8_t)__s2, __p3); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfmaq_laneq_f16(__p0, __p1, __p2, __p3) __extension__ ({ \ + float16x8_t __ret; \ + float16x8_t __s0 = __p0; \ + float16x8_t __s1 = __p1; \ + float16x8_t __s2 = __p2; \ + __ret = (float16x8_t) __builtin_neon_vfmaq_laneq_f16((int8x16_t)__s0, (int8x16_t)__s1, (int8x16_t)__s2, __p3, 40); \ + __ret; \ +}) +#else +#define vfmaq_laneq_f16(__p0, __p1, __p2, __p3) __extension__ ({ \ + float16x8_t __ret; \ + float16x8_t __s0 = __p0; \ + float16x8_t __s1 = __p1; \ + float16x8_t __s2 = __p2; \ + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x8_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (float16x8_t) __builtin_neon_vfmaq_laneq_f16((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, __p3, 40); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_vfmaq_laneq_f16(__p0, __p1, __p2, __p3) __extension__ ({ \ + float16x8_t __ret; \ + float16x8_t __s0 = __p0; \ + float16x8_t __s1 = __p1; \ + float16x8_t __s2 = __p2; \ + __ret = (float16x8_t) __builtin_neon_vfmaq_laneq_f16((int8x16_t)__s0, (int8x16_t)__s1, (int8x16_t)__s2, __p3, 40); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfma_laneq_f16(__p0, __p1, __p2, __p3) __extension__ ({ \ + float16x4_t __ret; \ + float16x4_t __s0 = __p0; \ + float16x4_t __s1 = __p1; \ + float16x8_t __s2 = __p2; \ + __ret = (float16x4_t) __builtin_neon_vfma_laneq_f16((int8x8_t)__s0, (int8x8_t)__s1, (int8x16_t)__s2, __p3, 8); \ + __ret; \ +}) +#else +#define vfma_laneq_f16(__p0, __p1, __p2, __p3) __extension__ ({ \ + float16x4_t __ret; \ + float16x4_t __s0 = __p0; \ + float16x4_t __s1 = __p1; \ + float16x8_t __s2 = __p2; \ + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + float16x8_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (float16x4_t) __builtin_neon_vfma_laneq_f16((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x16_t)__rev2, __p3, 8); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_vfma_laneq_f16(__p0, __p1, __p2, __p3) __extension__ ({ \ + float16x4_t __ret; \ + float16x4_t __s0 = __p0; \ + float16x4_t __s1 = __p1; \ + float16x8_t __s2 = __p2; \ + __ret = (float16x4_t) __builtin_neon_vfma_laneq_f16((int8x8_t)__s0, (int8x8_t)__s1, (int8x16_t)__s2, __p3, 8); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfmaq_n_f16(__p0, __p1, __p2) __extension__ ({ \ + float16x8_t __ret; \ + float16x8_t __s0 = __p0; \ + float16x8_t __s1 = __p1; \ + float16_t __s2 = __p2; \ + __ret = vfmaq_f16(__s0, __s1, (float16x8_t) {__s2, __s2, __s2, __s2, __s2, __s2, __s2, __s2}); \ + __ret; \ +}) +#else +#define vfmaq_n_f16(__p0, __p1, __p2) __extension__ ({ \ + float16x8_t __ret; \ + float16x8_t __s0 = __p0; \ + float16x8_t __s1 = __p1; \ + float16_t __s2 = __p2; \ + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = __noswap_vfmaq_f16(__rev0, __rev1, (float16x8_t) {__s2, __s2, __s2, __s2, __s2, __s2, __s2, __s2}); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfma_n_f16(__p0, __p1, __p2) __extension__ ({ \ + float16x4_t __ret; \ + float16x4_t __s0 = __p0; \ + float16x4_t __s1 = __p1; \ + float16_t __s2 = __p2; \ + __ret = vfma_f16(__s0, __s1, (float16x4_t) {__s2, __s2, __s2, __s2}); \ + __ret; \ +}) +#else +#define vfma_n_f16(__p0, __p1, __p2) __extension__ ({ \ + float16x4_t __ret; \ + float16x4_t __s0 = __p0; \ + float16x4_t __s1 = __p1; \ + float16_t __s2 = __p2; \ + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + __ret = __noswap_vfma_f16(__rev0, __rev1, (float16x4_t) {__s2, __s2, __s2, __s2}); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfmsh_lane_f16(__p0_755, __p1_755, __p2_755, __p3_755) __extension__ ({ \ + float16_t __ret_755; \ + float16_t __s0_755 = __p0_755; \ + float16_t __s1_755 = __p1_755; \ + float16x4_t __s2_755 = __p2_755; \ + __ret_755 = vfmah_lane_f16(__s0_755, -__s1_755, __s2_755, __p3_755); \ + __ret_755; \ +}) +#else +#define vfmsh_lane_f16(__p0_756, __p1_756, __p2_756, __p3_756) __extension__ ({ \ + float16_t __ret_756; \ + float16_t __s0_756 = __p0_756; \ + float16_t __s1_756 = __p1_756; \ + float16x4_t __s2_756 = __p2_756; \ + float16x4_t __rev2_756; __rev2_756 = __builtin_shufflevector(__s2_756, __s2_756, 3, 2, 1, 0); \ + __ret_756 = __noswap_vfmah_lane_f16(__s0_756, -__s1_756, __rev2_756, __p3_756); \ + __ret_756; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfmsq_lane_f16(__p0_757, __p1_757, __p2_757, __p3_757) __extension__ ({ \ + float16x8_t __ret_757; \ + float16x8_t __s0_757 = __p0_757; \ + float16x8_t __s1_757 = __p1_757; \ + float16x4_t __s2_757 = __p2_757; \ + __ret_757 = vfmaq_lane_f16(__s0_757, -__s1_757, __s2_757, __p3_757); \ + __ret_757; \ +}) +#else +#define vfmsq_lane_f16(__p0_758, __p1_758, __p2_758, __p3_758) __extension__ ({ \ + float16x8_t __ret_758; \ + float16x8_t __s0_758 = __p0_758; \ + float16x8_t __s1_758 = __p1_758; \ + float16x4_t __s2_758 = __p2_758; \ + float16x8_t __rev0_758; __rev0_758 = __builtin_shufflevector(__s0_758, __s0_758, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x8_t __rev1_758; __rev1_758 = __builtin_shufflevector(__s1_758, __s1_758, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x4_t __rev2_758; __rev2_758 = __builtin_shufflevector(__s2_758, __s2_758, 3, 2, 1, 0); \ + __ret_758 = __noswap_vfmaq_lane_f16(__rev0_758, -__rev1_758, __rev2_758, __p3_758); \ + __ret_758 = __builtin_shufflevector(__ret_758, __ret_758, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_758; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfms_lane_f16(__p0_759, __p1_759, __p2_759, __p3_759) __extension__ ({ \ + float16x4_t __ret_759; \ + float16x4_t __s0_759 = __p0_759; \ + float16x4_t __s1_759 = __p1_759; \ + float16x4_t __s2_759 = __p2_759; \ + __ret_759 = vfma_lane_f16(__s0_759, -__s1_759, __s2_759, __p3_759); \ + __ret_759; \ +}) +#else +#define vfms_lane_f16(__p0_760, __p1_760, __p2_760, __p3_760) __extension__ ({ \ + float16x4_t __ret_760; \ + float16x4_t __s0_760 = __p0_760; \ + float16x4_t __s1_760 = __p1_760; \ + float16x4_t __s2_760 = __p2_760; \ + float16x4_t __rev0_760; __rev0_760 = __builtin_shufflevector(__s0_760, __s0_760, 3, 2, 1, 0); \ + float16x4_t __rev1_760; __rev1_760 = __builtin_shufflevector(__s1_760, __s1_760, 3, 2, 1, 0); \ + float16x4_t __rev2_760; __rev2_760 = __builtin_shufflevector(__s2_760, __s2_760, 3, 2, 1, 0); \ + __ret_760 = __noswap_vfma_lane_f16(__rev0_760, -__rev1_760, __rev2_760, __p3_760); \ + __ret_760 = __builtin_shufflevector(__ret_760, __ret_760, 3, 2, 1, 0); \ + __ret_760; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfmsh_laneq_f16(__p0_761, __p1_761, __p2_761, __p3_761) __extension__ ({ \ + float16_t __ret_761; \ + float16_t __s0_761 = __p0_761; \ + float16_t __s1_761 = __p1_761; \ + float16x8_t __s2_761 = __p2_761; \ + __ret_761 = vfmah_laneq_f16(__s0_761, -__s1_761, __s2_761, __p3_761); \ + __ret_761; \ +}) +#else +#define vfmsh_laneq_f16(__p0_762, __p1_762, __p2_762, __p3_762) __extension__ ({ \ + float16_t __ret_762; \ + float16_t __s0_762 = __p0_762; \ + float16_t __s1_762 = __p1_762; \ + float16x8_t __s2_762 = __p2_762; \ + float16x8_t __rev2_762; __rev2_762 = __builtin_shufflevector(__s2_762, __s2_762, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_762 = __noswap_vfmah_laneq_f16(__s0_762, -__s1_762, __rev2_762, __p3_762); \ + __ret_762; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfmsq_laneq_f16(__p0_763, __p1_763, __p2_763, __p3_763) __extension__ ({ \ + float16x8_t __ret_763; \ + float16x8_t __s0_763 = __p0_763; \ + float16x8_t __s1_763 = __p1_763; \ + float16x8_t __s2_763 = __p2_763; \ + __ret_763 = vfmaq_laneq_f16(__s0_763, -__s1_763, __s2_763, __p3_763); \ + __ret_763; \ +}) +#else +#define vfmsq_laneq_f16(__p0_764, __p1_764, __p2_764, __p3_764) __extension__ ({ \ + float16x8_t __ret_764; \ + float16x8_t __s0_764 = __p0_764; \ + float16x8_t __s1_764 = __p1_764; \ + float16x8_t __s2_764 = __p2_764; \ + float16x8_t __rev0_764; __rev0_764 = __builtin_shufflevector(__s0_764, __s0_764, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x8_t __rev1_764; __rev1_764 = __builtin_shufflevector(__s1_764, __s1_764, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x8_t __rev2_764; __rev2_764 = __builtin_shufflevector(__s2_764, __s2_764, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_764 = __noswap_vfmaq_laneq_f16(__rev0_764, -__rev1_764, __rev2_764, __p3_764); \ + __ret_764 = __builtin_shufflevector(__ret_764, __ret_764, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_764; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfms_laneq_f16(__p0_765, __p1_765, __p2_765, __p3_765) __extension__ ({ \ + float16x4_t __ret_765; \ + float16x4_t __s0_765 = __p0_765; \ + float16x4_t __s1_765 = __p1_765; \ + float16x8_t __s2_765 = __p2_765; \ + __ret_765 = vfma_laneq_f16(__s0_765, -__s1_765, __s2_765, __p3_765); \ + __ret_765; \ +}) +#else +#define vfms_laneq_f16(__p0_766, __p1_766, __p2_766, __p3_766) __extension__ ({ \ + float16x4_t __ret_766; \ + float16x4_t __s0_766 = __p0_766; \ + float16x4_t __s1_766 = __p1_766; \ + float16x8_t __s2_766 = __p2_766; \ + float16x4_t __rev0_766; __rev0_766 = __builtin_shufflevector(__s0_766, __s0_766, 3, 2, 1, 0); \ + float16x4_t __rev1_766; __rev1_766 = __builtin_shufflevector(__s1_766, __s1_766, 3, 2, 1, 0); \ + float16x8_t __rev2_766; __rev2_766 = __builtin_shufflevector(__s2_766, __s2_766, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_766 = __noswap_vfma_laneq_f16(__rev0_766, -__rev1_766, __rev2_766, __p3_766); \ + __ret_766 = __builtin_shufflevector(__ret_766, __ret_766, 3, 2, 1, 0); \ + __ret_766; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfmsq_n_f16(__p0, __p1, __p2) __extension__ ({ \ + float16x8_t __ret; \ + float16x8_t __s0 = __p0; \ + float16x8_t __s1 = __p1; \ + float16_t __s2 = __p2; \ + __ret = vfmaq_f16(__s0, -__s1, (float16x8_t) {__s2, __s2, __s2, __s2, __s2, __s2, __s2, __s2}); \ + __ret; \ +}) +#else +#define vfmsq_n_f16(__p0, __p1, __p2) __extension__ ({ \ + float16x8_t __ret; \ + float16x8_t __s0 = __p0; \ + float16x8_t __s1 = __p1; \ + float16_t __s2 = __p2; \ + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = __noswap_vfmaq_f16(__rev0, -__rev1, (float16x8_t) {__s2, __s2, __s2, __s2, __s2, __s2, __s2, __s2}); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfms_n_f16(__p0, __p1, __p2) __extension__ ({ \ + float16x4_t __ret; \ + float16x4_t __s0 = __p0; \ + float16x4_t __s1 = __p1; \ + float16_t __s2 = __p2; \ + __ret = vfma_f16(__s0, -__s1, (float16x4_t) {__s2, __s2, __s2, __s2}); \ + __ret; \ +}) +#else +#define vfms_n_f16(__p0, __p1, __p2) __extension__ ({ \ + float16x4_t __ret; \ + float16x4_t __s0 = __p0; \ + float16x4_t __s1 = __p1; \ + float16_t __s2 = __p2; \ + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + __ret = __noswap_vfma_f16(__rev0, -__rev1, (float16x4_t) {__s2, __s2, __s2, __s2}); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmaxnmvq_f16(__p0) __extension__ ({ \ + float16_t __ret; \ + float16x8_t __s0 = __p0; \ + __ret = (float16_t) __builtin_neon_vmaxnmvq_f16((int8x16_t)__s0); \ + __ret; \ +}) +#else +#define vmaxnmvq_f16(__p0) __extension__ ({ \ + float16_t __ret; \ + float16x8_t __s0 = __p0; \ + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (float16_t) __builtin_neon_vmaxnmvq_f16((int8x16_t)__rev0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmaxnmv_f16(__p0) __extension__ ({ \ + float16_t __ret; \ + float16x4_t __s0 = __p0; \ + __ret = (float16_t) __builtin_neon_vmaxnmv_f16((int8x8_t)__s0); \ + __ret; \ +}) +#else +#define vmaxnmv_f16(__p0) __extension__ ({ \ + float16_t __ret; \ + float16x4_t __s0 = __p0; \ + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + __ret = (float16_t) __builtin_neon_vmaxnmv_f16((int8x8_t)__rev0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmaxvq_f16(__p0) __extension__ ({ \ + float16_t __ret; \ + float16x8_t __s0 = __p0; \ + __ret = (float16_t) __builtin_neon_vmaxvq_f16((int8x16_t)__s0); \ + __ret; \ +}) +#else +#define vmaxvq_f16(__p0) __extension__ ({ \ + float16_t __ret; \ + float16x8_t __s0 = __p0; \ + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (float16_t) __builtin_neon_vmaxvq_f16((int8x16_t)__rev0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmaxv_f16(__p0) __extension__ ({ \ + float16_t __ret; \ + float16x4_t __s0 = __p0; \ + __ret = (float16_t) __builtin_neon_vmaxv_f16((int8x8_t)__s0); \ + __ret; \ +}) +#else +#define vmaxv_f16(__p0) __extension__ ({ \ + float16_t __ret; \ + float16x4_t __s0 = __p0; \ + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + __ret = (float16_t) __builtin_neon_vmaxv_f16((int8x8_t)__rev0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vminnmvq_f16(__p0) __extension__ ({ \ + float16_t __ret; \ + float16x8_t __s0 = __p0; \ + __ret = (float16_t) __builtin_neon_vminnmvq_f16((int8x16_t)__s0); \ + __ret; \ +}) +#else +#define vminnmvq_f16(__p0) __extension__ ({ \ + float16_t __ret; \ + float16x8_t __s0 = __p0; \ + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (float16_t) __builtin_neon_vminnmvq_f16((int8x16_t)__rev0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vminnmv_f16(__p0) __extension__ ({ \ + float16_t __ret; \ + float16x4_t __s0 = __p0; \ + __ret = (float16_t) __builtin_neon_vminnmv_f16((int8x8_t)__s0); \ + __ret; \ +}) +#else +#define vminnmv_f16(__p0) __extension__ ({ \ + float16_t __ret; \ + float16x4_t __s0 = __p0; \ + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + __ret = (float16_t) __builtin_neon_vminnmv_f16((int8x8_t)__rev0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vminvq_f16(__p0) __extension__ ({ \ + float16_t __ret; \ + float16x8_t __s0 = __p0; \ + __ret = (float16_t) __builtin_neon_vminvq_f16((int8x16_t)__s0); \ + __ret; \ +}) +#else +#define vminvq_f16(__p0) __extension__ ({ \ + float16_t __ret; \ + float16x8_t __s0 = __p0; \ + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (float16_t) __builtin_neon_vminvq_f16((int8x16_t)__rev0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vminv_f16(__p0) __extension__ ({ \ + float16_t __ret; \ + float16x4_t __s0 = __p0; \ + __ret = (float16_t) __builtin_neon_vminv_f16((int8x8_t)__s0); \ + __ret; \ +}) +#else +#define vminv_f16(__p0) __extension__ ({ \ + float16_t __ret; \ + float16x4_t __s0 = __p0; \ + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + __ret = (float16_t) __builtin_neon_vminv_f16((int8x8_t)__rev0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmulq_laneq_f16(__p0_767, __p1_767, __p2_767) __extension__ ({ \ + float16x8_t __ret_767; \ + float16x8_t __s0_767 = __p0_767; \ + float16x8_t __s1_767 = __p1_767; \ + __ret_767 = __s0_767 * splatq_laneq_f16(__s1_767, __p2_767); \ + __ret_767; \ +}) +#else +#define vmulq_laneq_f16(__p0_768, __p1_768, __p2_768) __extension__ ({ \ + float16x8_t __ret_768; \ + float16x8_t __s0_768 = __p0_768; \ + float16x8_t __s1_768 = __p1_768; \ + float16x8_t __rev0_768; __rev0_768 = __builtin_shufflevector(__s0_768, __s0_768, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x8_t __rev1_768; __rev1_768 = __builtin_shufflevector(__s1_768, __s1_768, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_768 = __rev0_768 * __noswap_splatq_laneq_f16(__rev1_768, __p2_768); \ + __ret_768 = __builtin_shufflevector(__ret_768, __ret_768, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_768; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmul_laneq_f16(__p0_769, __p1_769, __p2_769) __extension__ ({ \ + float16x4_t __ret_769; \ + float16x4_t __s0_769 = __p0_769; \ + float16x8_t __s1_769 = __p1_769; \ + __ret_769 = __s0_769 * splat_laneq_f16(__s1_769, __p2_769); \ + __ret_769; \ +}) +#else +#define vmul_laneq_f16(__p0_770, __p1_770, __p2_770) __extension__ ({ \ + float16x4_t __ret_770; \ + float16x4_t __s0_770 = __p0_770; \ + float16x8_t __s1_770 = __p1_770; \ + float16x4_t __rev0_770; __rev0_770 = __builtin_shufflevector(__s0_770, __s0_770, 3, 2, 1, 0); \ + float16x8_t __rev1_770; __rev1_770 = __builtin_shufflevector(__s1_770, __s1_770, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_770 = __rev0_770 * __noswap_splat_laneq_f16(__rev1_770, __p2_770); \ + __ret_770 = __builtin_shufflevector(__ret_770, __ret_770, 3, 2, 1, 0); \ + __ret_770; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) float16x8_t vmulxq_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + __ret = (float16x8_t) __builtin_neon_vmulxq_f16((int8x16_t)__p0, (int8x16_t)__p1, 40); + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) float16x8_t vmulxq_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (float16x8_t) __builtin_neon_vmulxq_f16((int8x16_t)__rev0, (int8x16_t)__rev1, 40); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("fullfp16"))) float16x8_t __noswap_vmulxq_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + __ret = (float16x8_t) __builtin_neon_vmulxq_f16((int8x16_t)__p0, (int8x16_t)__p1, 40); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) float16x4_t vmulx_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vmulx_f16((int8x8_t)__p0, (int8x8_t)__p1, 8); + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) float16x4_t vmulx_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (float16x4_t) __builtin_neon_vmulx_f16((int8x8_t)__rev0, (int8x8_t)__rev1, 8); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("fullfp16"))) float16x4_t __noswap_vmulx_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vmulx_f16((int8x8_t)__p0, (int8x8_t)__p1, 8); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmulxh_lane_f16(__p0, __p1, __p2) __extension__ ({ \ + float16_t __ret; \ + float16_t __s0 = __p0; \ + float16x4_t __s1 = __p1; \ + __ret = (float16_t) __builtin_neon_vmulxh_lane_f16(__s0, (float16x4_t)__s1, __p2); \ + __ret; \ +}) +#else +#define vmulxh_lane_f16(__p0, __p1, __p2) __extension__ ({ \ + float16_t __ret; \ + float16_t __s0 = __p0; \ + float16x4_t __s1 = __p1; \ + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + __ret = (float16_t) __builtin_neon_vmulxh_lane_f16(__s0, (float16x4_t)__rev1, __p2); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmulxq_lane_f16(__p0_771, __p1_771, __p2_771) __extension__ ({ \ + float16x8_t __ret_771; \ + float16x8_t __s0_771 = __p0_771; \ + float16x4_t __s1_771 = __p1_771; \ + __ret_771 = vmulxq_f16(__s0_771, splatq_lane_f16(__s1_771, __p2_771)); \ + __ret_771; \ +}) +#else +#define vmulxq_lane_f16(__p0_772, __p1_772, __p2_772) __extension__ ({ \ + float16x8_t __ret_772; \ + float16x8_t __s0_772 = __p0_772; \ + float16x4_t __s1_772 = __p1_772; \ + float16x8_t __rev0_772; __rev0_772 = __builtin_shufflevector(__s0_772, __s0_772, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x4_t __rev1_772; __rev1_772 = __builtin_shufflevector(__s1_772, __s1_772, 3, 2, 1, 0); \ + __ret_772 = __noswap_vmulxq_f16(__rev0_772, __noswap_splatq_lane_f16(__rev1_772, __p2_772)); \ + __ret_772 = __builtin_shufflevector(__ret_772, __ret_772, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_772; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmulx_lane_f16(__p0_773, __p1_773, __p2_773) __extension__ ({ \ + float16x4_t __ret_773; \ + float16x4_t __s0_773 = __p0_773; \ + float16x4_t __s1_773 = __p1_773; \ + __ret_773 = vmulx_f16(__s0_773, splat_lane_f16(__s1_773, __p2_773)); \ + __ret_773; \ +}) +#else +#define vmulx_lane_f16(__p0_774, __p1_774, __p2_774) __extension__ ({ \ + float16x4_t __ret_774; \ + float16x4_t __s0_774 = __p0_774; \ + float16x4_t __s1_774 = __p1_774; \ + float16x4_t __rev0_774; __rev0_774 = __builtin_shufflevector(__s0_774, __s0_774, 3, 2, 1, 0); \ + float16x4_t __rev1_774; __rev1_774 = __builtin_shufflevector(__s1_774, __s1_774, 3, 2, 1, 0); \ + __ret_774 = __noswap_vmulx_f16(__rev0_774, __noswap_splat_lane_f16(__rev1_774, __p2_774)); \ + __ret_774 = __builtin_shufflevector(__ret_774, __ret_774, 3, 2, 1, 0); \ + __ret_774; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmulxh_laneq_f16(__p0, __p1, __p2) __extension__ ({ \ + float16_t __ret; \ + float16_t __s0 = __p0; \ + float16x8_t __s1 = __p1; \ + __ret = (float16_t) __builtin_neon_vmulxh_laneq_f16(__s0, (float16x8_t)__s1, __p2); \ + __ret; \ +}) +#else +#define vmulxh_laneq_f16(__p0, __p1, __p2) __extension__ ({ \ + float16_t __ret; \ + float16_t __s0 = __p0; \ + float16x8_t __s1 = __p1; \ + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (float16_t) __builtin_neon_vmulxh_laneq_f16(__s0, (float16x8_t)__rev1, __p2); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmulxq_laneq_f16(__p0_775, __p1_775, __p2_775) __extension__ ({ \ + float16x8_t __ret_775; \ + float16x8_t __s0_775 = __p0_775; \ + float16x8_t __s1_775 = __p1_775; \ + __ret_775 = vmulxq_f16(__s0_775, splatq_laneq_f16(__s1_775, __p2_775)); \ + __ret_775; \ +}) +#else +#define vmulxq_laneq_f16(__p0_776, __p1_776, __p2_776) __extension__ ({ \ + float16x8_t __ret_776; \ + float16x8_t __s0_776 = __p0_776; \ + float16x8_t __s1_776 = __p1_776; \ + float16x8_t __rev0_776; __rev0_776 = __builtin_shufflevector(__s0_776, __s0_776, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x8_t __rev1_776; __rev1_776 = __builtin_shufflevector(__s1_776, __s1_776, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_776 = __noswap_vmulxq_f16(__rev0_776, __noswap_splatq_laneq_f16(__rev1_776, __p2_776)); \ + __ret_776 = __builtin_shufflevector(__ret_776, __ret_776, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_776; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmulx_laneq_f16(__p0_777, __p1_777, __p2_777) __extension__ ({ \ + float16x4_t __ret_777; \ + float16x4_t __s0_777 = __p0_777; \ + float16x8_t __s1_777 = __p1_777; \ + __ret_777 = vmulx_f16(__s0_777, splat_laneq_f16(__s1_777, __p2_777)); \ + __ret_777; \ +}) +#else +#define vmulx_laneq_f16(__p0_778, __p1_778, __p2_778) __extension__ ({ \ + float16x4_t __ret_778; \ + float16x4_t __s0_778 = __p0_778; \ + float16x8_t __s1_778 = __p1_778; \ + float16x4_t __rev0_778; __rev0_778 = __builtin_shufflevector(__s0_778, __s0_778, 3, 2, 1, 0); \ + float16x8_t __rev1_778; __rev1_778 = __builtin_shufflevector(__s1_778, __s1_778, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_778 = __noswap_vmulx_f16(__rev0_778, __noswap_splat_laneq_f16(__rev1_778, __p2_778)); \ + __ret_778 = __builtin_shufflevector(__ret_778, __ret_778, 3, 2, 1, 0); \ + __ret_778; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmulxq_n_f16(__p0, __p1) __extension__ ({ \ + float16x8_t __ret; \ + float16x8_t __s0 = __p0; \ + float16_t __s1 = __p1; \ + __ret = vmulxq_f16(__s0, (float16x8_t) {__s1, __s1, __s1, __s1, __s1, __s1, __s1, __s1}); \ + __ret; \ +}) +#else +#define vmulxq_n_f16(__p0, __p1) __extension__ ({ \ + float16x8_t __ret; \ + float16x8_t __s0 = __p0; \ + float16_t __s1 = __p1; \ + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = __noswap_vmulxq_f16(__rev0, (float16x8_t) {__s1, __s1, __s1, __s1, __s1, __s1, __s1, __s1}); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmulx_n_f16(__p0, __p1) __extension__ ({ \ + float16x4_t __ret; \ + float16x4_t __s0 = __p0; \ + float16_t __s1 = __p1; \ + __ret = vmulx_f16(__s0, (float16x4_t) {__s1, __s1, __s1, __s1}); \ + __ret; \ +}) +#else +#define vmulx_n_f16(__p0, __p1) __extension__ ({ \ + float16x4_t __ret; \ + float16x4_t __s0 = __p0; \ + float16_t __s1 = __p1; \ + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + __ret = __noswap_vmulx_f16(__rev0, (float16x4_t) {__s1, __s1, __s1, __s1}); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) float16x8_t vpaddq_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + __ret = (float16x8_t) __builtin_neon_vpaddq_f16((int8x16_t)__p0, (int8x16_t)__p1, 40); + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) float16x8_t vpaddq_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (float16x8_t) __builtin_neon_vpaddq_f16((int8x16_t)__rev0, (int8x16_t)__rev1, 40); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) float16x8_t vpmaxq_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + __ret = (float16x8_t) __builtin_neon_vpmaxq_f16((int8x16_t)__p0, (int8x16_t)__p1, 40); + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) float16x8_t vpmaxq_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (float16x8_t) __builtin_neon_vpmaxq_f16((int8x16_t)__rev0, (int8x16_t)__rev1, 40); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) float16x8_t vpmaxnmq_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + __ret = (float16x8_t) __builtin_neon_vpmaxnmq_f16((int8x16_t)__p0, (int8x16_t)__p1, 40); + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) float16x8_t vpmaxnmq_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (float16x8_t) __builtin_neon_vpmaxnmq_f16((int8x16_t)__rev0, (int8x16_t)__rev1, 40); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) float16x4_t vpmaxnm_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vpmaxnm_f16((int8x8_t)__p0, (int8x8_t)__p1, 8); + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) float16x4_t vpmaxnm_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (float16x4_t) __builtin_neon_vpmaxnm_f16((int8x8_t)__rev0, (int8x8_t)__rev1, 8); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) float16x8_t vpminq_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + __ret = (float16x8_t) __builtin_neon_vpminq_f16((int8x16_t)__p0, (int8x16_t)__p1, 40); + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) float16x8_t vpminq_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (float16x8_t) __builtin_neon_vpminq_f16((int8x16_t)__rev0, (int8x16_t)__rev1, 40); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) float16x8_t vpminnmq_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + __ret = (float16x8_t) __builtin_neon_vpminnmq_f16((int8x16_t)__p0, (int8x16_t)__p1, 40); + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) float16x8_t vpminnmq_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (float16x8_t) __builtin_neon_vpminnmq_f16((int8x16_t)__rev0, (int8x16_t)__rev1, 40); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) float16x4_t vpminnm_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vpminnm_f16((int8x8_t)__p0, (int8x8_t)__p1, 8); + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) float16x4_t vpminnm_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (float16x4_t) __builtin_neon_vpminnm_f16((int8x8_t)__rev0, (int8x8_t)__rev1, 8); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) float16x8_t vrndiq_f16(float16x8_t __p0) { + float16x8_t __ret; + __ret = (float16x8_t) __builtin_neon_vrndiq_f16((int8x16_t)__p0, 40); + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) float16x8_t vrndiq_f16(float16x8_t __p0) { + float16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (float16x8_t) __builtin_neon_vrndiq_f16((int8x16_t)__rev0, 40); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) float16x4_t vrndi_f16(float16x4_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vrndi_f16((int8x8_t)__p0, 8); + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) float16x4_t vrndi_f16(float16x4_t __p0) { + float16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (float16x4_t) __builtin_neon_vrndi_f16((int8x8_t)__rev0, 8); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) float16x8_t vsqrtq_f16(float16x8_t __p0) { + float16x8_t __ret; + __ret = (float16x8_t) __builtin_neon_vsqrtq_f16((int8x16_t)__p0, 40); + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) float16x8_t vsqrtq_f16(float16x8_t __p0) { + float16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (float16x8_t) __builtin_neon_vsqrtq_f16((int8x16_t)__rev0, 40); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) float16x4_t vsqrt_f16(float16x4_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vsqrt_f16((int8x8_t)__p0, 8); + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) float16x4_t vsqrt_f16(float16x4_t __p0) { + float16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (float16x4_t) __builtin_neon_vsqrt_f16((int8x8_t)__rev0, 8); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) float16x8_t vtrn1q_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 2, 10, 4, 12, 6, 14); + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) float16x8_t vtrn1q_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 2, 10, 4, 12, 6, 14); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) float16x4_t vtrn1_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 2, 6); + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) float16x4_t vtrn1_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 2, 6); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) float16x8_t vtrn2q_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 9, 3, 11, 5, 13, 7, 15); + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) float16x8_t vtrn2q_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 9, 3, 11, 5, 13, 7, 15); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) float16x4_t vtrn2_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 5, 3, 7); + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) float16x4_t vtrn2_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 5, 3, 7); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) float16x8_t vuzp1q_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6, 8, 10, 12, 14); + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) float16x8_t vuzp1q_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6, 8, 10, 12, 14); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) float16x4_t vuzp1_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6); + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) float16x4_t vuzp1_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) float16x8_t vuzp2q_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7, 9, 11, 13, 15); + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) float16x8_t vuzp2q_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7, 9, 11, 13, 15); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) float16x4_t vuzp2_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7); + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) float16x4_t vuzp2_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) float16x8_t vzip1q_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 1, 9, 2, 10, 3, 11); + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) float16x8_t vzip1q_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 1, 9, 2, 10, 3, 11); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) float16x4_t vzip1_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 1, 5); + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) float16x4_t vzip1_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 1, 5); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) float16x8_t vzip2q_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 4, 12, 5, 13, 6, 14, 7, 15); + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) float16x8_t vzip2q_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 4, 12, 5, 13, 6, 14, 7, 15); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16"))) float16x4_t vzip2_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 2, 6, 3, 7); + return __ret; +} +#else +__ai __attribute__((target("fullfp16"))) float16x4_t vzip2_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 2, 6, 3, 7); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsudotq_laneq_s32(__p0_779, __p1_779, __p2_779, __p3_779) __extension__ ({ \ + int32x4_t __ret_779; \ + int32x4_t __s0_779 = __p0_779; \ + int8x16_t __s1_779 = __p1_779; \ + uint8x16_t __s2_779 = __p2_779; \ +uint8x16_t __reint_779 = __s2_779; \ + __ret_779 = vusdotq_s32(__s0_779, (uint8x16_t)(splatq_laneq_s32(*(int32x4_t *) &__reint_779, __p3_779)), __s1_779); \ + __ret_779; \ +}) +#else +#define vsudotq_laneq_s32(__p0_780, __p1_780, __p2_780, __p3_780) __extension__ ({ \ + int32x4_t __ret_780; \ + int32x4_t __s0_780 = __p0_780; \ + int8x16_t __s1_780 = __p1_780; \ + uint8x16_t __s2_780 = __p2_780; \ + int32x4_t __rev0_780; __rev0_780 = __builtin_shufflevector(__s0_780, __s0_780, 3, 2, 1, 0); \ + int8x16_t __rev1_780; __rev1_780 = __builtin_shufflevector(__s1_780, __s1_780, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint8x16_t __rev2_780; __rev2_780 = __builtin_shufflevector(__s2_780, __s2_780, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ +uint8x16_t __reint_780 = __rev2_780; \ + __ret_780 = __noswap_vusdotq_s32(__rev0_780, (uint8x16_t)(__noswap_splatq_laneq_s32(*(int32x4_t *) &__reint_780, __p3_780)), __rev1_780); \ + __ret_780 = __builtin_shufflevector(__ret_780, __ret_780, 3, 2, 1, 0); \ + __ret_780; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsudot_laneq_s32(__p0_781, __p1_781, __p2_781, __p3_781) __extension__ ({ \ + int32x2_t __ret_781; \ + int32x2_t __s0_781 = __p0_781; \ + int8x8_t __s1_781 = __p1_781; \ + uint8x16_t __s2_781 = __p2_781; \ +uint8x16_t __reint_781 = __s2_781; \ + __ret_781 = vusdot_s32(__s0_781, (uint8x8_t)(splat_laneq_s32(*(int32x4_t *) &__reint_781, __p3_781)), __s1_781); \ + __ret_781; \ +}) +#else +#define vsudot_laneq_s32(__p0_782, __p1_782, __p2_782, __p3_782) __extension__ ({ \ + int32x2_t __ret_782; \ + int32x2_t __s0_782 = __p0_782; \ + int8x8_t __s1_782 = __p1_782; \ + uint8x16_t __s2_782 = __p2_782; \ + int32x2_t __rev0_782; __rev0_782 = __builtin_shufflevector(__s0_782, __s0_782, 1, 0); \ + int8x8_t __rev1_782; __rev1_782 = __builtin_shufflevector(__s1_782, __s1_782, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint8x16_t __rev2_782; __rev2_782 = __builtin_shufflevector(__s2_782, __s2_782, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ +uint8x16_t __reint_782 = __rev2_782; \ + __ret_782 = __noswap_vusdot_s32(__rev0_782, (uint8x8_t)(__noswap_splat_laneq_s32(*(int32x4_t *) &__reint_782, __p3_782)), __rev1_782); \ + __ret_782 = __builtin_shufflevector(__ret_782, __ret_782, 1, 0); \ + __ret_782; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vusdotq_laneq_s32(__p0_783, __p1_783, __p2_783, __p3_783) __extension__ ({ \ + int32x4_t __ret_783; \ + int32x4_t __s0_783 = __p0_783; \ + uint8x16_t __s1_783 = __p1_783; \ + int8x16_t __s2_783 = __p2_783; \ +int8x16_t __reint_783 = __s2_783; \ + __ret_783 = vusdotq_s32(__s0_783, __s1_783, (int8x16_t)(splatq_laneq_s32(*(int32x4_t *) &__reint_783, __p3_783))); \ + __ret_783; \ +}) +#else +#define vusdotq_laneq_s32(__p0_784, __p1_784, __p2_784, __p3_784) __extension__ ({ \ + int32x4_t __ret_784; \ + int32x4_t __s0_784 = __p0_784; \ + uint8x16_t __s1_784 = __p1_784; \ + int8x16_t __s2_784 = __p2_784; \ + int32x4_t __rev0_784; __rev0_784 = __builtin_shufflevector(__s0_784, __s0_784, 3, 2, 1, 0); \ + uint8x16_t __rev1_784; __rev1_784 = __builtin_shufflevector(__s1_784, __s1_784, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + int8x16_t __rev2_784; __rev2_784 = __builtin_shufflevector(__s2_784, __s2_784, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ +int8x16_t __reint_784 = __rev2_784; \ + __ret_784 = __noswap_vusdotq_s32(__rev0_784, __rev1_784, (int8x16_t)(__noswap_splatq_laneq_s32(*(int32x4_t *) &__reint_784, __p3_784))); \ + __ret_784 = __builtin_shufflevector(__ret_784, __ret_784, 3, 2, 1, 0); \ + __ret_784; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vusdot_laneq_s32(__p0_785, __p1_785, __p2_785, __p3_785) __extension__ ({ \ + int32x2_t __ret_785; \ + int32x2_t __s0_785 = __p0_785; \ + uint8x8_t __s1_785 = __p1_785; \ + int8x16_t __s2_785 = __p2_785; \ +int8x16_t __reint_785 = __s2_785; \ + __ret_785 = vusdot_s32(__s0_785, __s1_785, (int8x8_t)(splat_laneq_s32(*(int32x4_t *) &__reint_785, __p3_785))); \ + __ret_785; \ +}) +#else +#define vusdot_laneq_s32(__p0_786, __p1_786, __p2_786, __p3_786) __extension__ ({ \ + int32x2_t __ret_786; \ + int32x2_t __s0_786 = __p0_786; \ + uint8x8_t __s1_786 = __p1_786; \ + int8x16_t __s2_786 = __p2_786; \ + int32x2_t __rev0_786; __rev0_786 = __builtin_shufflevector(__s0_786, __s0_786, 1, 0); \ + uint8x8_t __rev1_786; __rev1_786 = __builtin_shufflevector(__s1_786, __s1_786, 7, 6, 5, 4, 3, 2, 1, 0); \ + int8x16_t __rev2_786; __rev2_786 = __builtin_shufflevector(__s2_786, __s2_786, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ +int8x16_t __reint_786 = __rev2_786; \ + __ret_786 = __noswap_vusdot_s32(__rev0_786, __rev1_786, (int8x8_t)(__noswap_splat_laneq_s32(*(int32x4_t *) &__reint_786, __p3_786))); \ + __ret_786 = __builtin_shufflevector(__ret_786, __ret_786, 1, 0); \ + __ret_786; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("sha3"))) uint8x16_t vbcaxq_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) { + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vbcaxq_u8((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 48); + return __ret; +} +#else +__ai __attribute__((target("sha3"))) uint8x16_t vbcaxq_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) { + uint8x16_t __ret; + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x16_t) __builtin_neon_vbcaxq_u8((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 48); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("sha3"))) uint32x4_t vbcaxq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vbcaxq_u32((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 50); + return __ret; +} +#else +__ai __attribute__((target("sha3"))) uint32x4_t vbcaxq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { + uint32x4_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = (uint32x4_t) __builtin_neon_vbcaxq_u32((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("sha3"))) uint64x2_t vbcaxq_u64(uint64x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) { + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vbcaxq_u64((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 51); + return __ret; +} +#else +__ai __attribute__((target("sha3"))) uint64x2_t vbcaxq_u64(uint64x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) { + uint64x2_t __ret; + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + __ret = (uint64x2_t) __builtin_neon_vbcaxq_u64((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 51); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("sha3"))) uint16x8_t vbcaxq_u16(uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vbcaxq_u16((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 49); + return __ret; +} +#else +__ai __attribute__((target("sha3"))) uint16x8_t vbcaxq_u16(uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) { + uint16x8_t __ret; + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint16x8_t) __builtin_neon_vbcaxq_u16((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 49); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("sha3"))) int8x16_t vbcaxq_s8(int8x16_t __p0, int8x16_t __p1, int8x16_t __p2) { + int8x16_t __ret; + __ret = (int8x16_t) __builtin_neon_vbcaxq_s8((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 32); + return __ret; +} +#else +__ai __attribute__((target("sha3"))) int8x16_t vbcaxq_s8(int8x16_t __p0, int8x16_t __p1, int8x16_t __p2) { + int8x16_t __ret; + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8x16_t) __builtin_neon_vbcaxq_s8((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 32); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("sha3"))) int32x4_t vbcaxq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vbcaxq_s32((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 34); + return __ret; +} +#else +__ai __attribute__((target("sha3"))) int32x4_t vbcaxq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) { + int32x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = (int32x4_t) __builtin_neon_vbcaxq_s32((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 34); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("sha3"))) int64x2_t vbcaxq_s64(int64x2_t __p0, int64x2_t __p1, int64x2_t __p2) { + int64x2_t __ret; + __ret = (int64x2_t) __builtin_neon_vbcaxq_s64((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 35); + return __ret; +} +#else +__ai __attribute__((target("sha3"))) int64x2_t vbcaxq_s64(int64x2_t __p0, int64x2_t __p1, int64x2_t __p2) { + int64x2_t __ret; + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + int64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + __ret = (int64x2_t) __builtin_neon_vbcaxq_s64((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 35); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("sha3"))) int16x8_t vbcaxq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) { + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vbcaxq_s16((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 33); + return __ret; +} +#else +__ai __attribute__((target("sha3"))) int16x8_t vbcaxq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) { + int16x8_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int16x8_t) __builtin_neon_vbcaxq_s16((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 33); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("sha3"))) uint8x16_t veor3q_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) { + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_veor3q_u8((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 48); + return __ret; +} +#else +__ai __attribute__((target("sha3"))) uint8x16_t veor3q_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) { + uint8x16_t __ret; + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x16_t) __builtin_neon_veor3q_u8((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 48); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("sha3"))) uint32x4_t veor3q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_veor3q_u32((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 50); + return __ret; +} +#else +__ai __attribute__((target("sha3"))) uint32x4_t veor3q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { + uint32x4_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = (uint32x4_t) __builtin_neon_veor3q_u32((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("sha3"))) uint64x2_t veor3q_u64(uint64x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) { + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_veor3q_u64((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 51); + return __ret; +} +#else +__ai __attribute__((target("sha3"))) uint64x2_t veor3q_u64(uint64x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) { + uint64x2_t __ret; + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + __ret = (uint64x2_t) __builtin_neon_veor3q_u64((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 51); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("sha3"))) uint16x8_t veor3q_u16(uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_veor3q_u16((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 49); + return __ret; +} +#else +__ai __attribute__((target("sha3"))) uint16x8_t veor3q_u16(uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) { + uint16x8_t __ret; + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint16x8_t) __builtin_neon_veor3q_u16((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 49); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("sha3"))) int8x16_t veor3q_s8(int8x16_t __p0, int8x16_t __p1, int8x16_t __p2) { + int8x16_t __ret; + __ret = (int8x16_t) __builtin_neon_veor3q_s8((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 32); + return __ret; +} +#else +__ai __attribute__((target("sha3"))) int8x16_t veor3q_s8(int8x16_t __p0, int8x16_t __p1, int8x16_t __p2) { + int8x16_t __ret; + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8x16_t) __builtin_neon_veor3q_s8((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 32); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("sha3"))) int32x4_t veor3q_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_veor3q_s32((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 34); + return __ret; +} +#else +__ai __attribute__((target("sha3"))) int32x4_t veor3q_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) { + int32x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = (int32x4_t) __builtin_neon_veor3q_s32((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 34); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("sha3"))) int64x2_t veor3q_s64(int64x2_t __p0, int64x2_t __p1, int64x2_t __p2) { + int64x2_t __ret; + __ret = (int64x2_t) __builtin_neon_veor3q_s64((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 35); + return __ret; +} +#else +__ai __attribute__((target("sha3"))) int64x2_t veor3q_s64(int64x2_t __p0, int64x2_t __p1, int64x2_t __p2) { + int64x2_t __ret; + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + int64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + __ret = (int64x2_t) __builtin_neon_veor3q_s64((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 35); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("sha3"))) int16x8_t veor3q_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) { + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_veor3q_s16((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 33); + return __ret; +} +#else +__ai __attribute__((target("sha3"))) int16x8_t veor3q_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) { + int16x8_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int16x8_t) __builtin_neon_veor3q_s16((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 33); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("sha3"))) uint64x2_t vrax1q_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vrax1q_u64((int8x16_t)__p0, (int8x16_t)__p1, 51); + return __ret; +} +#else +__ai __attribute__((target("sha3"))) uint64x2_t vrax1q_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint64x2_t __ret; + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (uint64x2_t) __builtin_neon_vrax1q_u64((int8x16_t)__rev0, (int8x16_t)__rev1, 51); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("sha3"))) uint64x2_t vsha512hq_u64(uint64x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) { + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vsha512hq_u64((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 51); + return __ret; +} +#else +__ai __attribute__((target("sha3"))) uint64x2_t vsha512hq_u64(uint64x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) { + uint64x2_t __ret; + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + __ret = (uint64x2_t) __builtin_neon_vsha512hq_u64((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 51); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("sha3"))) uint64x2_t vsha512h2q_u64(uint64x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) { + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vsha512h2q_u64((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 51); + return __ret; +} +#else +__ai __attribute__((target("sha3"))) uint64x2_t vsha512h2q_u64(uint64x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) { + uint64x2_t __ret; + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + __ret = (uint64x2_t) __builtin_neon_vsha512h2q_u64((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 51); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("sha3"))) uint64x2_t vsha512su0q_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vsha512su0q_u64((int8x16_t)__p0, (int8x16_t)__p1, 51); + return __ret; +} +#else +__ai __attribute__((target("sha3"))) uint64x2_t vsha512su0q_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint64x2_t __ret; + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (uint64x2_t) __builtin_neon_vsha512su0q_u64((int8x16_t)__rev0, (int8x16_t)__rev1, 51); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("sha3"))) uint64x2_t vsha512su1q_u64(uint64x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) { + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vsha512su1q_u64((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 51); + return __ret; +} +#else +__ai __attribute__((target("sha3"))) uint64x2_t vsha512su1q_u64(uint64x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) { + uint64x2_t __ret; + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + __ret = (uint64x2_t) __builtin_neon_vsha512su1q_u64((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 51); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vxarq_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64x2_t __ret; \ + uint64x2_t __s0 = __p0; \ + uint64x2_t __s1 = __p1; \ + __ret = (uint64x2_t) __builtin_neon_vxarq_u64((int8x16_t)__s0, (int8x16_t)__s1, __p2, 51); \ + __ret; \ +}) +#else +#define vxarq_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64x2_t __ret; \ + uint64x2_t __s0 = __p0; \ + uint64x2_t __s1 = __p1; \ + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + __ret = (uint64x2_t) __builtin_neon_vxarq_u64((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 51); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("sm4"))) uint32x4_t vsm3partw1q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vsm3partw1q_u32((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 50); + return __ret; +} +#else +__ai __attribute__((target("sm4"))) uint32x4_t vsm3partw1q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { + uint32x4_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = (uint32x4_t) __builtin_neon_vsm3partw1q_u32((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("sm4"))) uint32x4_t vsm3partw2q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vsm3partw2q_u32((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 50); + return __ret; +} +#else +__ai __attribute__((target("sm4"))) uint32x4_t vsm3partw2q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { + uint32x4_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = (uint32x4_t) __builtin_neon_vsm3partw2q_u32((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("sm4"))) uint32x4_t vsm3ss1q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vsm3ss1q_u32((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 50); + return __ret; +} +#else +__ai __attribute__((target("sm4"))) uint32x4_t vsm3ss1q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { + uint32x4_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = (uint32x4_t) __builtin_neon_vsm3ss1q_u32((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsm3tt1aq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \ + uint32x4_t __ret; \ + uint32x4_t __s0 = __p0; \ + uint32x4_t __s1 = __p1; \ + uint32x4_t __s2 = __p2; \ + __ret = (uint32x4_t) __builtin_neon_vsm3tt1aq_u32((int8x16_t)__s0, (int8x16_t)__s1, (int8x16_t)__s2, __p3, 50); \ + __ret; \ +}) +#else +#define vsm3tt1aq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \ + uint32x4_t __ret; \ + uint32x4_t __s0 = __p0; \ + uint32x4_t __s1 = __p1; \ + uint32x4_t __s2 = __p2; \ + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \ + __ret = (uint32x4_t) __builtin_neon_vsm3tt1aq_u32((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, __p3, 50); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsm3tt1bq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \ + uint32x4_t __ret; \ + uint32x4_t __s0 = __p0; \ + uint32x4_t __s1 = __p1; \ + uint32x4_t __s2 = __p2; \ + __ret = (uint32x4_t) __builtin_neon_vsm3tt1bq_u32((int8x16_t)__s0, (int8x16_t)__s1, (int8x16_t)__s2, __p3, 50); \ + __ret; \ +}) +#else +#define vsm3tt1bq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \ + uint32x4_t __ret; \ + uint32x4_t __s0 = __p0; \ + uint32x4_t __s1 = __p1; \ + uint32x4_t __s2 = __p2; \ + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \ + __ret = (uint32x4_t) __builtin_neon_vsm3tt1bq_u32((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, __p3, 50); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsm3tt2aq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \ + uint32x4_t __ret; \ + uint32x4_t __s0 = __p0; \ + uint32x4_t __s1 = __p1; \ + uint32x4_t __s2 = __p2; \ + __ret = (uint32x4_t) __builtin_neon_vsm3tt2aq_u32((int8x16_t)__s0, (int8x16_t)__s1, (int8x16_t)__s2, __p3, 50); \ + __ret; \ +}) +#else +#define vsm3tt2aq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \ + uint32x4_t __ret; \ + uint32x4_t __s0 = __p0; \ + uint32x4_t __s1 = __p1; \ + uint32x4_t __s2 = __p2; \ + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \ + __ret = (uint32x4_t) __builtin_neon_vsm3tt2aq_u32((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, __p3, 50); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsm3tt2bq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \ + uint32x4_t __ret; \ + uint32x4_t __s0 = __p0; \ + uint32x4_t __s1 = __p1; \ + uint32x4_t __s2 = __p2; \ + __ret = (uint32x4_t) __builtin_neon_vsm3tt2bq_u32((int8x16_t)__s0, (int8x16_t)__s1, (int8x16_t)__s2, __p3, 50); \ + __ret; \ +}) +#else +#define vsm3tt2bq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \ + uint32x4_t __ret; \ + uint32x4_t __s0 = __p0; \ + uint32x4_t __s1 = __p1; \ + uint32x4_t __s2 = __p2; \ + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \ + __ret = (uint32x4_t) __builtin_neon_vsm3tt2bq_u32((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, __p3, 50); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("sm4"))) uint32x4_t vsm4eq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vsm4eq_u32((int8x16_t)__p0, (int8x16_t)__p1, 50); + return __ret; +} +#else +__ai __attribute__((target("sm4"))) uint32x4_t vsm4eq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint32x4_t) __builtin_neon_vsm4eq_u32((int8x16_t)__rev0, (int8x16_t)__rev1, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("sm4"))) uint32x4_t vsm4ekeyq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vsm4ekeyq_u32((int8x16_t)__p0, (int8x16_t)__p1, 50); + return __ret; +} +#else +__ai __attribute__((target("sm4"))) uint32x4_t vsm4ekeyq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint32x4_t) __builtin_neon_vsm4ekeyq_u32((int8x16_t)__rev0, (int8x16_t)__rev1, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +__ai __attribute__((target("v8.1a"))) int32_t vqrdmlahs_s32(int32_t __p0, int32_t __p1, int32_t __p2) { + int32_t __ret; + __ret = (int32_t) __builtin_neon_vqrdmlahs_s32(__p0, __p1, __p2); + return __ret; +} +__ai __attribute__((target("v8.1a"))) int16_t vqrdmlahh_s16(int16_t __p0, int16_t __p1, int16_t __p2) { + int16_t __ret; + __ret = (int16_t) __builtin_neon_vqrdmlahh_s16(__p0, __p1, __p2); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +#define vqrdmlahs_lane_s32(__p0_787, __p1_787, __p2_787, __p3_787) __extension__ ({ \ + int32_t __ret_787; \ + int32_t __s0_787 = __p0_787; \ + int32_t __s1_787 = __p1_787; \ + int32x2_t __s2_787 = __p2_787; \ + __ret_787 = vqrdmlahs_s32(__s0_787, __s1_787, vget_lane_s32(__s2_787, __p3_787)); \ + __ret_787; \ +}) +#else +#define vqrdmlahs_lane_s32(__p0_788, __p1_788, __p2_788, __p3_788) __extension__ ({ \ + int32_t __ret_788; \ + int32_t __s0_788 = __p0_788; \ + int32_t __s1_788 = __p1_788; \ + int32x2_t __s2_788 = __p2_788; \ + int32x2_t __rev2_788; __rev2_788 = __builtin_shufflevector(__s2_788, __s2_788, 1, 0); \ + __ret_788 = vqrdmlahs_s32(__s0_788, __s1_788, __noswap_vget_lane_s32(__rev2_788, __p3_788)); \ + __ret_788; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrdmlahh_lane_s16(__p0_789, __p1_789, __p2_789, __p3_789) __extension__ ({ \ + int16_t __ret_789; \ + int16_t __s0_789 = __p0_789; \ + int16_t __s1_789 = __p1_789; \ + int16x4_t __s2_789 = __p2_789; \ + __ret_789 = vqrdmlahh_s16(__s0_789, __s1_789, vget_lane_s16(__s2_789, __p3_789)); \ + __ret_789; \ +}) +#else +#define vqrdmlahh_lane_s16(__p0_790, __p1_790, __p2_790, __p3_790) __extension__ ({ \ + int16_t __ret_790; \ + int16_t __s0_790 = __p0_790; \ + int16_t __s1_790 = __p1_790; \ + int16x4_t __s2_790 = __p2_790; \ + int16x4_t __rev2_790; __rev2_790 = __builtin_shufflevector(__s2_790, __s2_790, 3, 2, 1, 0); \ + __ret_790 = vqrdmlahh_s16(__s0_790, __s1_790, __noswap_vget_lane_s16(__rev2_790, __p3_790)); \ + __ret_790; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrdmlahs_laneq_s32(__p0_791, __p1_791, __p2_791, __p3_791) __extension__ ({ \ + int32_t __ret_791; \ + int32_t __s0_791 = __p0_791; \ + int32_t __s1_791 = __p1_791; \ + int32x4_t __s2_791 = __p2_791; \ + __ret_791 = vqrdmlahs_s32(__s0_791, __s1_791, vgetq_lane_s32(__s2_791, __p3_791)); \ + __ret_791; \ +}) +#else +#define vqrdmlahs_laneq_s32(__p0_792, __p1_792, __p2_792, __p3_792) __extension__ ({ \ + int32_t __ret_792; \ + int32_t __s0_792 = __p0_792; \ + int32_t __s1_792 = __p1_792; \ + int32x4_t __s2_792 = __p2_792; \ + int32x4_t __rev2_792; __rev2_792 = __builtin_shufflevector(__s2_792, __s2_792, 3, 2, 1, 0); \ + __ret_792 = vqrdmlahs_s32(__s0_792, __s1_792, __noswap_vgetq_lane_s32(__rev2_792, __p3_792)); \ + __ret_792; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrdmlahh_laneq_s16(__p0_793, __p1_793, __p2_793, __p3_793) __extension__ ({ \ + int16_t __ret_793; \ + int16_t __s0_793 = __p0_793; \ + int16_t __s1_793 = __p1_793; \ + int16x8_t __s2_793 = __p2_793; \ + __ret_793 = vqrdmlahh_s16(__s0_793, __s1_793, vgetq_lane_s16(__s2_793, __p3_793)); \ + __ret_793; \ +}) +#else +#define vqrdmlahh_laneq_s16(__p0_794, __p1_794, __p2_794, __p3_794) __extension__ ({ \ + int16_t __ret_794; \ + int16_t __s0_794 = __p0_794; \ + int16_t __s1_794 = __p1_794; \ + int16x8_t __s2_794 = __p2_794; \ + int16x8_t __rev2_794; __rev2_794 = __builtin_shufflevector(__s2_794, __s2_794, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_794 = vqrdmlahh_s16(__s0_794, __s1_794, __noswap_vgetq_lane_s16(__rev2_794, __p3_794)); \ + __ret_794; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrdmlahq_laneq_s32(__p0_795, __p1_795, __p2_795, __p3_795) __extension__ ({ \ + int32x4_t __ret_795; \ + int32x4_t __s0_795 = __p0_795; \ + int32x4_t __s1_795 = __p1_795; \ + int32x4_t __s2_795 = __p2_795; \ + __ret_795 = vqrdmlahq_s32(__s0_795, __s1_795, splatq_laneq_s32(__s2_795, __p3_795)); \ + __ret_795; \ +}) +#else +#define vqrdmlahq_laneq_s32(__p0_796, __p1_796, __p2_796, __p3_796) __extension__ ({ \ + int32x4_t __ret_796; \ + int32x4_t __s0_796 = __p0_796; \ + int32x4_t __s1_796 = __p1_796; \ + int32x4_t __s2_796 = __p2_796; \ + int32x4_t __rev0_796; __rev0_796 = __builtin_shufflevector(__s0_796, __s0_796, 3, 2, 1, 0); \ + int32x4_t __rev1_796; __rev1_796 = __builtin_shufflevector(__s1_796, __s1_796, 3, 2, 1, 0); \ + int32x4_t __rev2_796; __rev2_796 = __builtin_shufflevector(__s2_796, __s2_796, 3, 2, 1, 0); \ + __ret_796 = __noswap_vqrdmlahq_s32(__rev0_796, __rev1_796, __noswap_splatq_laneq_s32(__rev2_796, __p3_796)); \ + __ret_796 = __builtin_shufflevector(__ret_796, __ret_796, 3, 2, 1, 0); \ + __ret_796; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrdmlahq_laneq_s16(__p0_797, __p1_797, __p2_797, __p3_797) __extension__ ({ \ + int16x8_t __ret_797; \ + int16x8_t __s0_797 = __p0_797; \ + int16x8_t __s1_797 = __p1_797; \ + int16x8_t __s2_797 = __p2_797; \ + __ret_797 = vqrdmlahq_s16(__s0_797, __s1_797, splatq_laneq_s16(__s2_797, __p3_797)); \ + __ret_797; \ +}) +#else +#define vqrdmlahq_laneq_s16(__p0_798, __p1_798, __p2_798, __p3_798) __extension__ ({ \ + int16x8_t __ret_798; \ + int16x8_t __s0_798 = __p0_798; \ + int16x8_t __s1_798 = __p1_798; \ + int16x8_t __s2_798 = __p2_798; \ + int16x8_t __rev0_798; __rev0_798 = __builtin_shufflevector(__s0_798, __s0_798, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __rev1_798; __rev1_798 = __builtin_shufflevector(__s1_798, __s1_798, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __rev2_798; __rev2_798 = __builtin_shufflevector(__s2_798, __s2_798, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_798 = __noswap_vqrdmlahq_s16(__rev0_798, __rev1_798, __noswap_splatq_laneq_s16(__rev2_798, __p3_798)); \ + __ret_798 = __builtin_shufflevector(__ret_798, __ret_798, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_798; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrdmlah_laneq_s32(__p0_799, __p1_799, __p2_799, __p3_799) __extension__ ({ \ + int32x2_t __ret_799; \ + int32x2_t __s0_799 = __p0_799; \ + int32x2_t __s1_799 = __p1_799; \ + int32x4_t __s2_799 = __p2_799; \ + __ret_799 = vqrdmlah_s32(__s0_799, __s1_799, splat_laneq_s32(__s2_799, __p3_799)); \ + __ret_799; \ +}) +#else +#define vqrdmlah_laneq_s32(__p0_800, __p1_800, __p2_800, __p3_800) __extension__ ({ \ + int32x2_t __ret_800; \ + int32x2_t __s0_800 = __p0_800; \ + int32x2_t __s1_800 = __p1_800; \ + int32x4_t __s2_800 = __p2_800; \ + int32x2_t __rev0_800; __rev0_800 = __builtin_shufflevector(__s0_800, __s0_800, 1, 0); \ + int32x2_t __rev1_800; __rev1_800 = __builtin_shufflevector(__s1_800, __s1_800, 1, 0); \ + int32x4_t __rev2_800; __rev2_800 = __builtin_shufflevector(__s2_800, __s2_800, 3, 2, 1, 0); \ + __ret_800 = __noswap_vqrdmlah_s32(__rev0_800, __rev1_800, __noswap_splat_laneq_s32(__rev2_800, __p3_800)); \ + __ret_800 = __builtin_shufflevector(__ret_800, __ret_800, 1, 0); \ + __ret_800; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrdmlah_laneq_s16(__p0_801, __p1_801, __p2_801, __p3_801) __extension__ ({ \ + int16x4_t __ret_801; \ + int16x4_t __s0_801 = __p0_801; \ + int16x4_t __s1_801 = __p1_801; \ + int16x8_t __s2_801 = __p2_801; \ + __ret_801 = vqrdmlah_s16(__s0_801, __s1_801, splat_laneq_s16(__s2_801, __p3_801)); \ + __ret_801; \ +}) +#else +#define vqrdmlah_laneq_s16(__p0_802, __p1_802, __p2_802, __p3_802) __extension__ ({ \ + int16x4_t __ret_802; \ + int16x4_t __s0_802 = __p0_802; \ + int16x4_t __s1_802 = __p1_802; \ + int16x8_t __s2_802 = __p2_802; \ + int16x4_t __rev0_802; __rev0_802 = __builtin_shufflevector(__s0_802, __s0_802, 3, 2, 1, 0); \ + int16x4_t __rev1_802; __rev1_802 = __builtin_shufflevector(__s1_802, __s1_802, 3, 2, 1, 0); \ + int16x8_t __rev2_802; __rev2_802 = __builtin_shufflevector(__s2_802, __s2_802, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_802 = __noswap_vqrdmlah_s16(__rev0_802, __rev1_802, __noswap_splat_laneq_s16(__rev2_802, __p3_802)); \ + __ret_802 = __builtin_shufflevector(__ret_802, __ret_802, 3, 2, 1, 0); \ + __ret_802; \ +}) +#endif + +__ai __attribute__((target("v8.1a"))) int32_t vqrdmlshs_s32(int32_t __p0, int32_t __p1, int32_t __p2) { + int32_t __ret; + __ret = (int32_t) __builtin_neon_vqrdmlshs_s32(__p0, __p1, __p2); + return __ret; +} +__ai __attribute__((target("v8.1a"))) int16_t vqrdmlshh_s16(int16_t __p0, int16_t __p1, int16_t __p2) { + int16_t __ret; + __ret = (int16_t) __builtin_neon_vqrdmlshh_s16(__p0, __p1, __p2); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +#define vqrdmlshs_lane_s32(__p0_803, __p1_803, __p2_803, __p3_803) __extension__ ({ \ + int32_t __ret_803; \ + int32_t __s0_803 = __p0_803; \ + int32_t __s1_803 = __p1_803; \ + int32x2_t __s2_803 = __p2_803; \ + __ret_803 = vqrdmlshs_s32(__s0_803, __s1_803, vget_lane_s32(__s2_803, __p3_803)); \ + __ret_803; \ +}) +#else +#define vqrdmlshs_lane_s32(__p0_804, __p1_804, __p2_804, __p3_804) __extension__ ({ \ + int32_t __ret_804; \ + int32_t __s0_804 = __p0_804; \ + int32_t __s1_804 = __p1_804; \ + int32x2_t __s2_804 = __p2_804; \ + int32x2_t __rev2_804; __rev2_804 = __builtin_shufflevector(__s2_804, __s2_804, 1, 0); \ + __ret_804 = vqrdmlshs_s32(__s0_804, __s1_804, __noswap_vget_lane_s32(__rev2_804, __p3_804)); \ + __ret_804; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrdmlshh_lane_s16(__p0_805, __p1_805, __p2_805, __p3_805) __extension__ ({ \ + int16_t __ret_805; \ + int16_t __s0_805 = __p0_805; \ + int16_t __s1_805 = __p1_805; \ + int16x4_t __s2_805 = __p2_805; \ + __ret_805 = vqrdmlshh_s16(__s0_805, __s1_805, vget_lane_s16(__s2_805, __p3_805)); \ + __ret_805; \ +}) +#else +#define vqrdmlshh_lane_s16(__p0_806, __p1_806, __p2_806, __p3_806) __extension__ ({ \ + int16_t __ret_806; \ + int16_t __s0_806 = __p0_806; \ + int16_t __s1_806 = __p1_806; \ + int16x4_t __s2_806 = __p2_806; \ + int16x4_t __rev2_806; __rev2_806 = __builtin_shufflevector(__s2_806, __s2_806, 3, 2, 1, 0); \ + __ret_806 = vqrdmlshh_s16(__s0_806, __s1_806, __noswap_vget_lane_s16(__rev2_806, __p3_806)); \ + __ret_806; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrdmlshs_laneq_s32(__p0_807, __p1_807, __p2_807, __p3_807) __extension__ ({ \ + int32_t __ret_807; \ + int32_t __s0_807 = __p0_807; \ + int32_t __s1_807 = __p1_807; \ + int32x4_t __s2_807 = __p2_807; \ + __ret_807 = vqrdmlshs_s32(__s0_807, __s1_807, vgetq_lane_s32(__s2_807, __p3_807)); \ + __ret_807; \ +}) +#else +#define vqrdmlshs_laneq_s32(__p0_808, __p1_808, __p2_808, __p3_808) __extension__ ({ \ + int32_t __ret_808; \ + int32_t __s0_808 = __p0_808; \ + int32_t __s1_808 = __p1_808; \ + int32x4_t __s2_808 = __p2_808; \ + int32x4_t __rev2_808; __rev2_808 = __builtin_shufflevector(__s2_808, __s2_808, 3, 2, 1, 0); \ + __ret_808 = vqrdmlshs_s32(__s0_808, __s1_808, __noswap_vgetq_lane_s32(__rev2_808, __p3_808)); \ + __ret_808; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrdmlshh_laneq_s16(__p0_809, __p1_809, __p2_809, __p3_809) __extension__ ({ \ + int16_t __ret_809; \ + int16_t __s0_809 = __p0_809; \ + int16_t __s1_809 = __p1_809; \ + int16x8_t __s2_809 = __p2_809; \ + __ret_809 = vqrdmlshh_s16(__s0_809, __s1_809, vgetq_lane_s16(__s2_809, __p3_809)); \ + __ret_809; \ +}) +#else +#define vqrdmlshh_laneq_s16(__p0_810, __p1_810, __p2_810, __p3_810) __extension__ ({ \ + int16_t __ret_810; \ + int16_t __s0_810 = __p0_810; \ + int16_t __s1_810 = __p1_810; \ + int16x8_t __s2_810 = __p2_810; \ + int16x8_t __rev2_810; __rev2_810 = __builtin_shufflevector(__s2_810, __s2_810, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_810 = vqrdmlshh_s16(__s0_810, __s1_810, __noswap_vgetq_lane_s16(__rev2_810, __p3_810)); \ + __ret_810; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrdmlshq_laneq_s32(__p0_811, __p1_811, __p2_811, __p3_811) __extension__ ({ \ + int32x4_t __ret_811; \ + int32x4_t __s0_811 = __p0_811; \ + int32x4_t __s1_811 = __p1_811; \ + int32x4_t __s2_811 = __p2_811; \ + __ret_811 = vqrdmlshq_s32(__s0_811, __s1_811, splatq_laneq_s32(__s2_811, __p3_811)); \ + __ret_811; \ +}) +#else +#define vqrdmlshq_laneq_s32(__p0_812, __p1_812, __p2_812, __p3_812) __extension__ ({ \ + int32x4_t __ret_812; \ + int32x4_t __s0_812 = __p0_812; \ + int32x4_t __s1_812 = __p1_812; \ + int32x4_t __s2_812 = __p2_812; \ + int32x4_t __rev0_812; __rev0_812 = __builtin_shufflevector(__s0_812, __s0_812, 3, 2, 1, 0); \ + int32x4_t __rev1_812; __rev1_812 = __builtin_shufflevector(__s1_812, __s1_812, 3, 2, 1, 0); \ + int32x4_t __rev2_812; __rev2_812 = __builtin_shufflevector(__s2_812, __s2_812, 3, 2, 1, 0); \ + __ret_812 = __noswap_vqrdmlshq_s32(__rev0_812, __rev1_812, __noswap_splatq_laneq_s32(__rev2_812, __p3_812)); \ + __ret_812 = __builtin_shufflevector(__ret_812, __ret_812, 3, 2, 1, 0); \ + __ret_812; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrdmlshq_laneq_s16(__p0_813, __p1_813, __p2_813, __p3_813) __extension__ ({ \ + int16x8_t __ret_813; \ + int16x8_t __s0_813 = __p0_813; \ + int16x8_t __s1_813 = __p1_813; \ + int16x8_t __s2_813 = __p2_813; \ + __ret_813 = vqrdmlshq_s16(__s0_813, __s1_813, splatq_laneq_s16(__s2_813, __p3_813)); \ + __ret_813; \ +}) +#else +#define vqrdmlshq_laneq_s16(__p0_814, __p1_814, __p2_814, __p3_814) __extension__ ({ \ + int16x8_t __ret_814; \ + int16x8_t __s0_814 = __p0_814; \ + int16x8_t __s1_814 = __p1_814; \ + int16x8_t __s2_814 = __p2_814; \ + int16x8_t __rev0_814; __rev0_814 = __builtin_shufflevector(__s0_814, __s0_814, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __rev1_814; __rev1_814 = __builtin_shufflevector(__s1_814, __s1_814, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __rev2_814; __rev2_814 = __builtin_shufflevector(__s2_814, __s2_814, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_814 = __noswap_vqrdmlshq_s16(__rev0_814, __rev1_814, __noswap_splatq_laneq_s16(__rev2_814, __p3_814)); \ + __ret_814 = __builtin_shufflevector(__ret_814, __ret_814, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_814; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrdmlsh_laneq_s32(__p0_815, __p1_815, __p2_815, __p3_815) __extension__ ({ \ + int32x2_t __ret_815; \ + int32x2_t __s0_815 = __p0_815; \ + int32x2_t __s1_815 = __p1_815; \ + int32x4_t __s2_815 = __p2_815; \ + __ret_815 = vqrdmlsh_s32(__s0_815, __s1_815, splat_laneq_s32(__s2_815, __p3_815)); \ + __ret_815; \ +}) +#else +#define vqrdmlsh_laneq_s32(__p0_816, __p1_816, __p2_816, __p3_816) __extension__ ({ \ + int32x2_t __ret_816; \ + int32x2_t __s0_816 = __p0_816; \ + int32x2_t __s1_816 = __p1_816; \ + int32x4_t __s2_816 = __p2_816; \ + int32x2_t __rev0_816; __rev0_816 = __builtin_shufflevector(__s0_816, __s0_816, 1, 0); \ + int32x2_t __rev1_816; __rev1_816 = __builtin_shufflevector(__s1_816, __s1_816, 1, 0); \ + int32x4_t __rev2_816; __rev2_816 = __builtin_shufflevector(__s2_816, __s2_816, 3, 2, 1, 0); \ + __ret_816 = __noswap_vqrdmlsh_s32(__rev0_816, __rev1_816, __noswap_splat_laneq_s32(__rev2_816, __p3_816)); \ + __ret_816 = __builtin_shufflevector(__ret_816, __ret_816, 1, 0); \ + __ret_816; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrdmlsh_laneq_s16(__p0_817, __p1_817, __p2_817, __p3_817) __extension__ ({ \ + int16x4_t __ret_817; \ + int16x4_t __s0_817 = __p0_817; \ + int16x4_t __s1_817 = __p1_817; \ + int16x8_t __s2_817 = __p2_817; \ + __ret_817 = vqrdmlsh_s16(__s0_817, __s1_817, splat_laneq_s16(__s2_817, __p3_817)); \ + __ret_817; \ +}) +#else +#define vqrdmlsh_laneq_s16(__p0_818, __p1_818, __p2_818, __p3_818) __extension__ ({ \ + int16x4_t __ret_818; \ + int16x4_t __s0_818 = __p0_818; \ + int16x4_t __s1_818 = __p1_818; \ + int16x8_t __s2_818 = __p2_818; \ + int16x4_t __rev0_818; __rev0_818 = __builtin_shufflevector(__s0_818, __s0_818, 3, 2, 1, 0); \ + int16x4_t __rev1_818; __rev1_818 = __builtin_shufflevector(__s1_818, __s1_818, 3, 2, 1, 0); \ + int16x8_t __rev2_818; __rev2_818 = __builtin_shufflevector(__s2_818, __s2_818, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_818 = __noswap_vqrdmlsh_s16(__rev0_818, __rev1_818, __noswap_splat_laneq_s16(__rev2_818, __p3_818)); \ + __ret_818 = __builtin_shufflevector(__ret_818, __ret_818, 3, 2, 1, 0); \ + __ret_818; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("v8.3a"))) float64x2_t vcaddq_rot270_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vcaddq_rot270_f64((int8x16_t)__p0, (int8x16_t)__p1, 42); + return __ret; +} +#else +__ai __attribute__((target("v8.3a"))) float64x2_t vcaddq_rot270_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (float64x2_t) __builtin_neon_vcaddq_rot270_f64((int8x16_t)__rev0, (int8x16_t)__rev1, 42); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("v8.3a"))) float64x2_t vcaddq_rot90_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vcaddq_rot90_f64((int8x16_t)__p0, (int8x16_t)__p1, 42); + return __ret; +} +#else +__ai __attribute__((target("v8.3a"))) float64x2_t vcaddq_rot90_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (float64x2_t) __builtin_neon_vcaddq_rot90_f64((int8x16_t)__rev0, (int8x16_t)__rev1, 42); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("v8.3a"))) float64x2_t vcmlaq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) { + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vcmlaq_f64((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 42); + return __ret; +} +#else +__ai __attribute__((target("v8.3a"))) float64x2_t vcmlaq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) { + float64x2_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + float64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + __ret = (float64x2_t) __builtin_neon_vcmlaq_f64((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 42); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai __attribute__((target("v8.3a"))) float64x2_t __noswap_vcmlaq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) { + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vcmlaq_f64((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 42); + return __ret; +} +#endif + +__ai __attribute__((target("v8.3a"))) float64x1_t vcmla_f64(float64x1_t __p0, float64x1_t __p1, float64x1_t __p2) { + float64x1_t __ret; + __ret = (float64x1_t) __builtin_neon_vcmla_f64((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 10); + return __ret; +} +#define vcmla_lane_f64(__p0_819, __p1_819, __p2_819, __p3_819) __extension__ ({ \ + float64x1_t __ret_819; \ + float64x1_t __s0_819 = __p0_819; \ + float64x1_t __s1_819 = __p1_819; \ + float64x1_t __s2_819 = __p2_819; \ +float64x1_t __reint_819 = __s2_819; \ +uint64x2_t __reint1_819 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_819, __p3_819), vgetq_lane_u64(*(uint64x2_t *) &__reint_819, __p3_819)}; \ + __ret_819 = vcmla_f64(__s0_819, __s1_819, *(float64x1_t *) &__reint1_819); \ + __ret_819; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vcmlaq_lane_f64(__p0_820, __p1_820, __p2_820, __p3_820) __extension__ ({ \ + float64x2_t __ret_820; \ + float64x2_t __s0_820 = __p0_820; \ + float64x2_t __s1_820 = __p1_820; \ + float64x1_t __s2_820 = __p2_820; \ +float64x1_t __reint_820 = __s2_820; \ +uint64x2_t __reint1_820 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_820, __p3_820), vgetq_lane_u64(*(uint64x2_t *) &__reint_820, __p3_820)}; \ + __ret_820 = vcmlaq_f64(__s0_820, __s1_820, *(float64x2_t *) &__reint1_820); \ + __ret_820; \ +}) +#else +#define vcmlaq_lane_f64(__p0_821, __p1_821, __p2_821, __p3_821) __extension__ ({ \ + float64x2_t __ret_821; \ + float64x2_t __s0_821 = __p0_821; \ + float64x2_t __s1_821 = __p1_821; \ + float64x1_t __s2_821 = __p2_821; \ + float64x2_t __rev0_821; __rev0_821 = __builtin_shufflevector(__s0_821, __s0_821, 1, 0); \ + float64x2_t __rev1_821; __rev1_821 = __builtin_shufflevector(__s1_821, __s1_821, 1, 0); \ +float64x1_t __reint_821 = __s2_821; \ +uint64x2_t __reint1_821 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_821, __p3_821), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_821, __p3_821)}; \ + __ret_821 = __noswap_vcmlaq_f64(__rev0_821, __rev1_821, *(float64x2_t *) &__reint1_821); \ + __ret_821 = __builtin_shufflevector(__ret_821, __ret_821, 1, 0); \ + __ret_821; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcmla_laneq_f64(__p0_822, __p1_822, __p2_822, __p3_822) __extension__ ({ \ + float64x1_t __ret_822; \ + float64x1_t __s0_822 = __p0_822; \ + float64x1_t __s1_822 = __p1_822; \ + float64x2_t __s2_822 = __p2_822; \ +float64x2_t __reint_822 = __s2_822; \ +uint64x2_t __reint1_822 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_822, __p3_822), vgetq_lane_u64(*(uint64x2_t *) &__reint_822, __p3_822)}; \ + __ret_822 = vcmla_f64(__s0_822, __s1_822, *(float64x1_t *) &__reint1_822); \ + __ret_822; \ +}) +#else +#define vcmla_laneq_f64(__p0_823, __p1_823, __p2_823, __p3_823) __extension__ ({ \ + float64x1_t __ret_823; \ + float64x1_t __s0_823 = __p0_823; \ + float64x1_t __s1_823 = __p1_823; \ + float64x2_t __s2_823 = __p2_823; \ + float64x2_t __rev2_823; __rev2_823 = __builtin_shufflevector(__s2_823, __s2_823, 1, 0); \ +float64x2_t __reint_823 = __rev2_823; \ +uint64x2_t __reint1_823 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_823, __p3_823), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_823, __p3_823)}; \ + __ret_823 = vcmla_f64(__s0_823, __s1_823, *(float64x1_t *) &__reint1_823); \ + __ret_823; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcmlaq_laneq_f64(__p0_824, __p1_824, __p2_824, __p3_824) __extension__ ({ \ + float64x2_t __ret_824; \ + float64x2_t __s0_824 = __p0_824; \ + float64x2_t __s1_824 = __p1_824; \ + float64x2_t __s2_824 = __p2_824; \ +float64x2_t __reint_824 = __s2_824; \ +uint64x2_t __reint1_824 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_824, __p3_824), vgetq_lane_u64(*(uint64x2_t *) &__reint_824, __p3_824)}; \ + __ret_824 = vcmlaq_f64(__s0_824, __s1_824, *(float64x2_t *) &__reint1_824); \ + __ret_824; \ +}) +#else +#define vcmlaq_laneq_f64(__p0_825, __p1_825, __p2_825, __p3_825) __extension__ ({ \ + float64x2_t __ret_825; \ + float64x2_t __s0_825 = __p0_825; \ + float64x2_t __s1_825 = __p1_825; \ + float64x2_t __s2_825 = __p2_825; \ + float64x2_t __rev0_825; __rev0_825 = __builtin_shufflevector(__s0_825, __s0_825, 1, 0); \ + float64x2_t __rev1_825; __rev1_825 = __builtin_shufflevector(__s1_825, __s1_825, 1, 0); \ + float64x2_t __rev2_825; __rev2_825 = __builtin_shufflevector(__s2_825, __s2_825, 1, 0); \ +float64x2_t __reint_825 = __rev2_825; \ +uint64x2_t __reint1_825 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_825, __p3_825), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_825, __p3_825)}; \ + __ret_825 = __noswap_vcmlaq_f64(__rev0_825, __rev1_825, *(float64x2_t *) &__reint1_825); \ + __ret_825 = __builtin_shufflevector(__ret_825, __ret_825, 1, 0); \ + __ret_825; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("v8.3a"))) float64x2_t vcmlaq_rot180_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) { + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vcmlaq_rot180_f64((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 42); + return __ret; +} +#else +__ai __attribute__((target("v8.3a"))) float64x2_t vcmlaq_rot180_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) { + float64x2_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + float64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + __ret = (float64x2_t) __builtin_neon_vcmlaq_rot180_f64((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 42); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai __attribute__((target("v8.3a"))) float64x2_t __noswap_vcmlaq_rot180_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) { + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vcmlaq_rot180_f64((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 42); + return __ret; +} +#endif + +__ai __attribute__((target("v8.3a"))) float64x1_t vcmla_rot180_f64(float64x1_t __p0, float64x1_t __p1, float64x1_t __p2) { + float64x1_t __ret; + __ret = (float64x1_t) __builtin_neon_vcmla_rot180_f64((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 10); + return __ret; +} +#define vcmla_rot180_lane_f64(__p0_826, __p1_826, __p2_826, __p3_826) __extension__ ({ \ + float64x1_t __ret_826; \ + float64x1_t __s0_826 = __p0_826; \ + float64x1_t __s1_826 = __p1_826; \ + float64x1_t __s2_826 = __p2_826; \ +float64x1_t __reint_826 = __s2_826; \ +uint64x2_t __reint1_826 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_826, __p3_826), vgetq_lane_u64(*(uint64x2_t *) &__reint_826, __p3_826)}; \ + __ret_826 = vcmla_rot180_f64(__s0_826, __s1_826, *(float64x1_t *) &__reint1_826); \ + __ret_826; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vcmlaq_rot180_lane_f64(__p0_827, __p1_827, __p2_827, __p3_827) __extension__ ({ \ + float64x2_t __ret_827; \ + float64x2_t __s0_827 = __p0_827; \ + float64x2_t __s1_827 = __p1_827; \ + float64x1_t __s2_827 = __p2_827; \ +float64x1_t __reint_827 = __s2_827; \ +uint64x2_t __reint1_827 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_827, __p3_827), vgetq_lane_u64(*(uint64x2_t *) &__reint_827, __p3_827)}; \ + __ret_827 = vcmlaq_rot180_f64(__s0_827, __s1_827, *(float64x2_t *) &__reint1_827); \ + __ret_827; \ +}) +#else +#define vcmlaq_rot180_lane_f64(__p0_828, __p1_828, __p2_828, __p3_828) __extension__ ({ \ + float64x2_t __ret_828; \ + float64x2_t __s0_828 = __p0_828; \ + float64x2_t __s1_828 = __p1_828; \ + float64x1_t __s2_828 = __p2_828; \ + float64x2_t __rev0_828; __rev0_828 = __builtin_shufflevector(__s0_828, __s0_828, 1, 0); \ + float64x2_t __rev1_828; __rev1_828 = __builtin_shufflevector(__s1_828, __s1_828, 1, 0); \ +float64x1_t __reint_828 = __s2_828; \ +uint64x2_t __reint1_828 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_828, __p3_828), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_828, __p3_828)}; \ + __ret_828 = __noswap_vcmlaq_rot180_f64(__rev0_828, __rev1_828, *(float64x2_t *) &__reint1_828); \ + __ret_828 = __builtin_shufflevector(__ret_828, __ret_828, 1, 0); \ + __ret_828; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcmla_rot180_laneq_f64(__p0_829, __p1_829, __p2_829, __p3_829) __extension__ ({ \ + float64x1_t __ret_829; \ + float64x1_t __s0_829 = __p0_829; \ + float64x1_t __s1_829 = __p1_829; \ + float64x2_t __s2_829 = __p2_829; \ +float64x2_t __reint_829 = __s2_829; \ +uint64x2_t __reint1_829 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_829, __p3_829), vgetq_lane_u64(*(uint64x2_t *) &__reint_829, __p3_829)}; \ + __ret_829 = vcmla_rot180_f64(__s0_829, __s1_829, *(float64x1_t *) &__reint1_829); \ + __ret_829; \ +}) +#else +#define vcmla_rot180_laneq_f64(__p0_830, __p1_830, __p2_830, __p3_830) __extension__ ({ \ + float64x1_t __ret_830; \ + float64x1_t __s0_830 = __p0_830; \ + float64x1_t __s1_830 = __p1_830; \ + float64x2_t __s2_830 = __p2_830; \ + float64x2_t __rev2_830; __rev2_830 = __builtin_shufflevector(__s2_830, __s2_830, 1, 0); \ +float64x2_t __reint_830 = __rev2_830; \ +uint64x2_t __reint1_830 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_830, __p3_830), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_830, __p3_830)}; \ + __ret_830 = vcmla_rot180_f64(__s0_830, __s1_830, *(float64x1_t *) &__reint1_830); \ + __ret_830; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcmlaq_rot180_laneq_f64(__p0_831, __p1_831, __p2_831, __p3_831) __extension__ ({ \ + float64x2_t __ret_831; \ + float64x2_t __s0_831 = __p0_831; \ + float64x2_t __s1_831 = __p1_831; \ + float64x2_t __s2_831 = __p2_831; \ +float64x2_t __reint_831 = __s2_831; \ +uint64x2_t __reint1_831 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_831, __p3_831), vgetq_lane_u64(*(uint64x2_t *) &__reint_831, __p3_831)}; \ + __ret_831 = vcmlaq_rot180_f64(__s0_831, __s1_831, *(float64x2_t *) &__reint1_831); \ + __ret_831; \ +}) +#else +#define vcmlaq_rot180_laneq_f64(__p0_832, __p1_832, __p2_832, __p3_832) __extension__ ({ \ + float64x2_t __ret_832; \ + float64x2_t __s0_832 = __p0_832; \ + float64x2_t __s1_832 = __p1_832; \ + float64x2_t __s2_832 = __p2_832; \ + float64x2_t __rev0_832; __rev0_832 = __builtin_shufflevector(__s0_832, __s0_832, 1, 0); \ + float64x2_t __rev1_832; __rev1_832 = __builtin_shufflevector(__s1_832, __s1_832, 1, 0); \ + float64x2_t __rev2_832; __rev2_832 = __builtin_shufflevector(__s2_832, __s2_832, 1, 0); \ +float64x2_t __reint_832 = __rev2_832; \ +uint64x2_t __reint1_832 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_832, __p3_832), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_832, __p3_832)}; \ + __ret_832 = __noswap_vcmlaq_rot180_f64(__rev0_832, __rev1_832, *(float64x2_t *) &__reint1_832); \ + __ret_832 = __builtin_shufflevector(__ret_832, __ret_832, 1, 0); \ + __ret_832; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("v8.3a"))) float64x2_t vcmlaq_rot270_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) { + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vcmlaq_rot270_f64((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 42); + return __ret; +} +#else +__ai __attribute__((target("v8.3a"))) float64x2_t vcmlaq_rot270_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) { + float64x2_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + float64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + __ret = (float64x2_t) __builtin_neon_vcmlaq_rot270_f64((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 42); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai __attribute__((target("v8.3a"))) float64x2_t __noswap_vcmlaq_rot270_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) { + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vcmlaq_rot270_f64((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 42); + return __ret; +} +#endif + +__ai __attribute__((target("v8.3a"))) float64x1_t vcmla_rot270_f64(float64x1_t __p0, float64x1_t __p1, float64x1_t __p2) { + float64x1_t __ret; + __ret = (float64x1_t) __builtin_neon_vcmla_rot270_f64((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 10); + return __ret; +} +#define vcmla_rot270_lane_f64(__p0_833, __p1_833, __p2_833, __p3_833) __extension__ ({ \ + float64x1_t __ret_833; \ + float64x1_t __s0_833 = __p0_833; \ + float64x1_t __s1_833 = __p1_833; \ + float64x1_t __s2_833 = __p2_833; \ +float64x1_t __reint_833 = __s2_833; \ +uint64x2_t __reint1_833 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_833, __p3_833), vgetq_lane_u64(*(uint64x2_t *) &__reint_833, __p3_833)}; \ + __ret_833 = vcmla_rot270_f64(__s0_833, __s1_833, *(float64x1_t *) &__reint1_833); \ + __ret_833; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vcmlaq_rot270_lane_f64(__p0_834, __p1_834, __p2_834, __p3_834) __extension__ ({ \ + float64x2_t __ret_834; \ + float64x2_t __s0_834 = __p0_834; \ + float64x2_t __s1_834 = __p1_834; \ + float64x1_t __s2_834 = __p2_834; \ +float64x1_t __reint_834 = __s2_834; \ +uint64x2_t __reint1_834 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_834, __p3_834), vgetq_lane_u64(*(uint64x2_t *) &__reint_834, __p3_834)}; \ + __ret_834 = vcmlaq_rot270_f64(__s0_834, __s1_834, *(float64x2_t *) &__reint1_834); \ + __ret_834; \ +}) +#else +#define vcmlaq_rot270_lane_f64(__p0_835, __p1_835, __p2_835, __p3_835) __extension__ ({ \ + float64x2_t __ret_835; \ + float64x2_t __s0_835 = __p0_835; \ + float64x2_t __s1_835 = __p1_835; \ + float64x1_t __s2_835 = __p2_835; \ + float64x2_t __rev0_835; __rev0_835 = __builtin_shufflevector(__s0_835, __s0_835, 1, 0); \ + float64x2_t __rev1_835; __rev1_835 = __builtin_shufflevector(__s1_835, __s1_835, 1, 0); \ +float64x1_t __reint_835 = __s2_835; \ +uint64x2_t __reint1_835 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_835, __p3_835), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_835, __p3_835)}; \ + __ret_835 = __noswap_vcmlaq_rot270_f64(__rev0_835, __rev1_835, *(float64x2_t *) &__reint1_835); \ + __ret_835 = __builtin_shufflevector(__ret_835, __ret_835, 1, 0); \ + __ret_835; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcmla_rot270_laneq_f64(__p0_836, __p1_836, __p2_836, __p3_836) __extension__ ({ \ + float64x1_t __ret_836; \ + float64x1_t __s0_836 = __p0_836; \ + float64x1_t __s1_836 = __p1_836; \ + float64x2_t __s2_836 = __p2_836; \ +float64x2_t __reint_836 = __s2_836; \ +uint64x2_t __reint1_836 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_836, __p3_836), vgetq_lane_u64(*(uint64x2_t *) &__reint_836, __p3_836)}; \ + __ret_836 = vcmla_rot270_f64(__s0_836, __s1_836, *(float64x1_t *) &__reint1_836); \ + __ret_836; \ +}) +#else +#define vcmla_rot270_laneq_f64(__p0_837, __p1_837, __p2_837, __p3_837) __extension__ ({ \ + float64x1_t __ret_837; \ + float64x1_t __s0_837 = __p0_837; \ + float64x1_t __s1_837 = __p1_837; \ + float64x2_t __s2_837 = __p2_837; \ + float64x2_t __rev2_837; __rev2_837 = __builtin_shufflevector(__s2_837, __s2_837, 1, 0); \ +float64x2_t __reint_837 = __rev2_837; \ +uint64x2_t __reint1_837 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_837, __p3_837), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_837, __p3_837)}; \ + __ret_837 = vcmla_rot270_f64(__s0_837, __s1_837, *(float64x1_t *) &__reint1_837); \ + __ret_837; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcmlaq_rot270_laneq_f64(__p0_838, __p1_838, __p2_838, __p3_838) __extension__ ({ \ + float64x2_t __ret_838; \ + float64x2_t __s0_838 = __p0_838; \ + float64x2_t __s1_838 = __p1_838; \ + float64x2_t __s2_838 = __p2_838; \ +float64x2_t __reint_838 = __s2_838; \ +uint64x2_t __reint1_838 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_838, __p3_838), vgetq_lane_u64(*(uint64x2_t *) &__reint_838, __p3_838)}; \ + __ret_838 = vcmlaq_rot270_f64(__s0_838, __s1_838, *(float64x2_t *) &__reint1_838); \ + __ret_838; \ +}) +#else +#define vcmlaq_rot270_laneq_f64(__p0_839, __p1_839, __p2_839, __p3_839) __extension__ ({ \ + float64x2_t __ret_839; \ + float64x2_t __s0_839 = __p0_839; \ + float64x2_t __s1_839 = __p1_839; \ + float64x2_t __s2_839 = __p2_839; \ + float64x2_t __rev0_839; __rev0_839 = __builtin_shufflevector(__s0_839, __s0_839, 1, 0); \ + float64x2_t __rev1_839; __rev1_839 = __builtin_shufflevector(__s1_839, __s1_839, 1, 0); \ + float64x2_t __rev2_839; __rev2_839 = __builtin_shufflevector(__s2_839, __s2_839, 1, 0); \ +float64x2_t __reint_839 = __rev2_839; \ +uint64x2_t __reint1_839 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_839, __p3_839), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_839, __p3_839)}; \ + __ret_839 = __noswap_vcmlaq_rot270_f64(__rev0_839, __rev1_839, *(float64x2_t *) &__reint1_839); \ + __ret_839 = __builtin_shufflevector(__ret_839, __ret_839, 1, 0); \ + __ret_839; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("v8.3a"))) float64x2_t vcmlaq_rot90_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) { + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vcmlaq_rot90_f64((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 42); + return __ret; +} +#else +__ai __attribute__((target("v8.3a"))) float64x2_t vcmlaq_rot90_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) { + float64x2_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + float64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + __ret = (float64x2_t) __builtin_neon_vcmlaq_rot90_f64((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 42); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai __attribute__((target("v8.3a"))) float64x2_t __noswap_vcmlaq_rot90_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) { + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vcmlaq_rot90_f64((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 42); + return __ret; +} +#endif + +__ai __attribute__((target("v8.3a"))) float64x1_t vcmla_rot90_f64(float64x1_t __p0, float64x1_t __p1, float64x1_t __p2) { + float64x1_t __ret; + __ret = (float64x1_t) __builtin_neon_vcmla_rot90_f64((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 10); + return __ret; +} +#define vcmla_rot90_lane_f64(__p0_840, __p1_840, __p2_840, __p3_840) __extension__ ({ \ + float64x1_t __ret_840; \ + float64x1_t __s0_840 = __p0_840; \ + float64x1_t __s1_840 = __p1_840; \ + float64x1_t __s2_840 = __p2_840; \ +float64x1_t __reint_840 = __s2_840; \ +uint64x2_t __reint1_840 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_840, __p3_840), vgetq_lane_u64(*(uint64x2_t *) &__reint_840, __p3_840)}; \ + __ret_840 = vcmla_rot90_f64(__s0_840, __s1_840, *(float64x1_t *) &__reint1_840); \ + __ret_840; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vcmlaq_rot90_lane_f64(__p0_841, __p1_841, __p2_841, __p3_841) __extension__ ({ \ + float64x2_t __ret_841; \ + float64x2_t __s0_841 = __p0_841; \ + float64x2_t __s1_841 = __p1_841; \ + float64x1_t __s2_841 = __p2_841; \ +float64x1_t __reint_841 = __s2_841; \ +uint64x2_t __reint1_841 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_841, __p3_841), vgetq_lane_u64(*(uint64x2_t *) &__reint_841, __p3_841)}; \ + __ret_841 = vcmlaq_rot90_f64(__s0_841, __s1_841, *(float64x2_t *) &__reint1_841); \ + __ret_841; \ +}) +#else +#define vcmlaq_rot90_lane_f64(__p0_842, __p1_842, __p2_842, __p3_842) __extension__ ({ \ + float64x2_t __ret_842; \ + float64x2_t __s0_842 = __p0_842; \ + float64x2_t __s1_842 = __p1_842; \ + float64x1_t __s2_842 = __p2_842; \ + float64x2_t __rev0_842; __rev0_842 = __builtin_shufflevector(__s0_842, __s0_842, 1, 0); \ + float64x2_t __rev1_842; __rev1_842 = __builtin_shufflevector(__s1_842, __s1_842, 1, 0); \ +float64x1_t __reint_842 = __s2_842; \ +uint64x2_t __reint1_842 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_842, __p3_842), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_842, __p3_842)}; \ + __ret_842 = __noswap_vcmlaq_rot90_f64(__rev0_842, __rev1_842, *(float64x2_t *) &__reint1_842); \ + __ret_842 = __builtin_shufflevector(__ret_842, __ret_842, 1, 0); \ + __ret_842; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcmla_rot90_laneq_f64(__p0_843, __p1_843, __p2_843, __p3_843) __extension__ ({ \ + float64x1_t __ret_843; \ + float64x1_t __s0_843 = __p0_843; \ + float64x1_t __s1_843 = __p1_843; \ + float64x2_t __s2_843 = __p2_843; \ +float64x2_t __reint_843 = __s2_843; \ +uint64x2_t __reint1_843 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_843, __p3_843), vgetq_lane_u64(*(uint64x2_t *) &__reint_843, __p3_843)}; \ + __ret_843 = vcmla_rot90_f64(__s0_843, __s1_843, *(float64x1_t *) &__reint1_843); \ + __ret_843; \ +}) +#else +#define vcmla_rot90_laneq_f64(__p0_844, __p1_844, __p2_844, __p3_844) __extension__ ({ \ + float64x1_t __ret_844; \ + float64x1_t __s0_844 = __p0_844; \ + float64x1_t __s1_844 = __p1_844; \ + float64x2_t __s2_844 = __p2_844; \ + float64x2_t __rev2_844; __rev2_844 = __builtin_shufflevector(__s2_844, __s2_844, 1, 0); \ +float64x2_t __reint_844 = __rev2_844; \ +uint64x2_t __reint1_844 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_844, __p3_844), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_844, __p3_844)}; \ + __ret_844 = vcmla_rot90_f64(__s0_844, __s1_844, *(float64x1_t *) &__reint1_844); \ + __ret_844; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcmlaq_rot90_laneq_f64(__p0_845, __p1_845, __p2_845, __p3_845) __extension__ ({ \ + float64x2_t __ret_845; \ + float64x2_t __s0_845 = __p0_845; \ + float64x2_t __s1_845 = __p1_845; \ + float64x2_t __s2_845 = __p2_845; \ +float64x2_t __reint_845 = __s2_845; \ +uint64x2_t __reint1_845 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_845, __p3_845), vgetq_lane_u64(*(uint64x2_t *) &__reint_845, __p3_845)}; \ + __ret_845 = vcmlaq_rot90_f64(__s0_845, __s1_845, *(float64x2_t *) &__reint1_845); \ + __ret_845; \ +}) +#else +#define vcmlaq_rot90_laneq_f64(__p0_846, __p1_846, __p2_846, __p3_846) __extension__ ({ \ + float64x2_t __ret_846; \ + float64x2_t __s0_846 = __p0_846; \ + float64x2_t __s1_846 = __p1_846; \ + float64x2_t __s2_846 = __p2_846; \ + float64x2_t __rev0_846; __rev0_846 = __builtin_shufflevector(__s0_846, __s0_846, 1, 0); \ + float64x2_t __rev1_846; __rev1_846 = __builtin_shufflevector(__s1_846, __s1_846, 1, 0); \ + float64x2_t __rev2_846; __rev2_846 = __builtin_shufflevector(__s2_846, __s2_846, 1, 0); \ +float64x2_t __reint_846 = __rev2_846; \ +uint64x2_t __reint1_846 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_846, __p3_846), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_846, __p3_846)}; \ + __ret_846 = __noswap_vcmlaq_rot90_f64(__rev0_846, __rev1_846, *(float64x2_t *) &__reint1_846); \ + __ret_846 = __builtin_shufflevector(__ret_846, __ret_846, 1, 0); \ + __ret_846; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("v8.5a"))) float32x4_t vrnd32xq_f32(float32x4_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vrnd32xq_f32((int8x16_t)__p0, 41); + return __ret; +} +#else +__ai __attribute__((target("v8.5a"))) float32x4_t vrnd32xq_f32(float32x4_t __p0) { + float32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (float32x4_t) __builtin_neon_vrnd32xq_f32((int8x16_t)__rev0, 41); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("v8.5a"))) float32x2_t vrnd32x_f32(float32x2_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vrnd32x_f32((int8x8_t)__p0, 9); + return __ret; +} +#else +__ai __attribute__((target("v8.5a"))) float32x2_t vrnd32x_f32(float32x2_t __p0) { + float32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (float32x2_t) __builtin_neon_vrnd32x_f32((int8x8_t)__rev0, 9); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("v8.5a"))) float32x4_t vrnd32zq_f32(float32x4_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vrnd32zq_f32((int8x16_t)__p0, 41); + return __ret; +} +#else +__ai __attribute__((target("v8.5a"))) float32x4_t vrnd32zq_f32(float32x4_t __p0) { + float32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (float32x4_t) __builtin_neon_vrnd32zq_f32((int8x16_t)__rev0, 41); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("v8.5a"))) float32x2_t vrnd32z_f32(float32x2_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vrnd32z_f32((int8x8_t)__p0, 9); + return __ret; +} +#else +__ai __attribute__((target("v8.5a"))) float32x2_t vrnd32z_f32(float32x2_t __p0) { + float32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (float32x2_t) __builtin_neon_vrnd32z_f32((int8x8_t)__rev0, 9); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("v8.5a"))) float32x4_t vrnd64xq_f32(float32x4_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vrnd64xq_f32((int8x16_t)__p0, 41); + return __ret; +} +#else +__ai __attribute__((target("v8.5a"))) float32x4_t vrnd64xq_f32(float32x4_t __p0) { + float32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (float32x4_t) __builtin_neon_vrnd64xq_f32((int8x16_t)__rev0, 41); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("v8.5a"))) float32x2_t vrnd64x_f32(float32x2_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vrnd64x_f32((int8x8_t)__p0, 9); + return __ret; +} +#else +__ai __attribute__((target("v8.5a"))) float32x2_t vrnd64x_f32(float32x2_t __p0) { + float32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (float32x2_t) __builtin_neon_vrnd64x_f32((int8x8_t)__rev0, 9); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("v8.5a"))) float32x4_t vrnd64zq_f32(float32x4_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vrnd64zq_f32((int8x16_t)__p0, 41); + return __ret; +} +#else +__ai __attribute__((target("v8.5a"))) float32x4_t vrnd64zq_f32(float32x4_t __p0) { + float32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (float32x4_t) __builtin_neon_vrnd64zq_f32((int8x16_t)__rev0, 41); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("v8.5a"))) float32x2_t vrnd64z_f32(float32x2_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vrnd64z_f32((int8x8_t)__p0, 9); + return __ret; +} +#else +__ai __attribute__((target("v8.5a"))) float32x2_t vrnd64z_f32(float32x2_t __p0) { + float32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (float32x2_t) __builtin_neon_vrnd64z_f32((int8x8_t)__rev0, 9); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#endif +#if defined(__aarch64__) && defined(__ARM_FEATURE_DIRECTED_ROUNDING) +#ifdef __LITTLE_ENDIAN__ +__ai float64x2_t vrndq_f64(float64x2_t __p0) { + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vrndq_v((int8x16_t)__p0, 42); + return __ret; +} +#else +__ai float64x2_t vrndq_f64(float64x2_t __p0) { + float64x2_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (float64x2_t) __builtin_neon_vrndq_v((int8x16_t)__rev0, 42); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai float64x1_t vrnd_f64(float64x1_t __p0) { + float64x1_t __ret; + __ret = (float64x1_t) __builtin_neon_vrnd_v((int8x8_t)__p0, 10); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai float64x2_t vrndaq_f64(float64x2_t __p0) { + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vrndaq_v((int8x16_t)__p0, 42); + return __ret; +} +#else +__ai float64x2_t vrndaq_f64(float64x2_t __p0) { + float64x2_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (float64x2_t) __builtin_neon_vrndaq_v((int8x16_t)__rev0, 42); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai float64x1_t vrnda_f64(float64x1_t __p0) { + float64x1_t __ret; + __ret = (float64x1_t) __builtin_neon_vrnda_v((int8x8_t)__p0, 10); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai float64x2_t vrndiq_f64(float64x2_t __p0) { + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vrndiq_v((int8x16_t)__p0, 42); + return __ret; +} +#else +__ai float64x2_t vrndiq_f64(float64x2_t __p0) { + float64x2_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (float64x2_t) __builtin_neon_vrndiq_v((int8x16_t)__rev0, 42); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai float64x1_t vrndi_f64(float64x1_t __p0) { + float64x1_t __ret; + __ret = (float64x1_t) __builtin_neon_vrndi_v((int8x8_t)__p0, 10); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai float64x2_t vrndmq_f64(float64x2_t __p0) { + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vrndmq_v((int8x16_t)__p0, 42); + return __ret; +} +#else +__ai float64x2_t vrndmq_f64(float64x2_t __p0) { + float64x2_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (float64x2_t) __builtin_neon_vrndmq_v((int8x16_t)__rev0, 42); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai float64x1_t vrndm_f64(float64x1_t __p0) { + float64x1_t __ret; + __ret = (float64x1_t) __builtin_neon_vrndm_v((int8x8_t)__p0, 10); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai float64x2_t vrndnq_f64(float64x2_t __p0) { + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vrndnq_v((int8x16_t)__p0, 42); + return __ret; +} +#else +__ai float64x2_t vrndnq_f64(float64x2_t __p0) { + float64x2_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (float64x2_t) __builtin_neon_vrndnq_v((int8x16_t)__rev0, 42); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai float64x1_t vrndn_f64(float64x1_t __p0) { + float64x1_t __ret; + __ret = (float64x1_t) __builtin_neon_vrndn_v((int8x8_t)__p0, 10); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai float64x2_t vrndpq_f64(float64x2_t __p0) { + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vrndpq_v((int8x16_t)__p0, 42); + return __ret; +} +#else +__ai float64x2_t vrndpq_f64(float64x2_t __p0) { + float64x2_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (float64x2_t) __builtin_neon_vrndpq_v((int8x16_t)__rev0, 42); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai float64x1_t vrndp_f64(float64x1_t __p0) { + float64x1_t __ret; + __ret = (float64x1_t) __builtin_neon_vrndp_v((int8x8_t)__p0, 10); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai float64x2_t vrndxq_f64(float64x2_t __p0) { + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vrndxq_v((int8x16_t)__p0, 42); + return __ret; +} +#else +__ai float64x2_t vrndxq_f64(float64x2_t __p0) { + float64x2_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (float64x2_t) __builtin_neon_vrndxq_v((int8x16_t)__rev0, 42); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai float64x1_t vrndx_f64(float64x1_t __p0) { + float64x1_t __ret; + __ret = (float64x1_t) __builtin_neon_vrndx_v((int8x8_t)__p0, 10); + return __ret; +} +#endif +#if defined(__aarch64__) && defined(__ARM_FEATURE_NUMERIC_MAXMIN) +#ifdef __LITTLE_ENDIAN__ +__ai float64x2_t vmaxnmq_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vmaxnmq_v((int8x16_t)__p0, (int8x16_t)__p1, 42); + return __ret; +} +#else +__ai float64x2_t vmaxnmq_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (float64x2_t) __builtin_neon_vmaxnmq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai float64x1_t vmaxnm_f64(float64x1_t __p0, float64x1_t __p1) { + float64x1_t __ret; + __ret = (float64x1_t) __builtin_neon_vmaxnm_v((int8x8_t)__p0, (int8x8_t)__p1, 10); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai float64x2_t vminnmq_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vminnmq_v((int8x16_t)__p0, (int8x16_t)__p1, 42); + return __ret; +} +#else +__ai float64x2_t vminnmq_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (float64x2_t) __builtin_neon_vminnmq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai float64x1_t vminnm_f64(float64x1_t __p0, float64x1_t __p1) { + float64x1_t __ret; + __ret = (float64x1_t) __builtin_neon_vminnm_v((int8x8_t)__p0, (int8x8_t)__p1, 10); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ __ai uint8x16_t vabaq_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) { @@ -67444,60 +67282,60 @@ __ai int32x4_t vaddw_s16(int32x4_t __p0, int16x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -#define vget_lane_f16(__p0_851, __p1_851) __extension__ ({ \ - float16_t __ret_851; \ - float16x4_t __s0_851 = __p0_851; \ -float16x4_t __reint_851 = __s0_851; \ -int16_t __reint1_851 = vget_lane_s16(*(int16x4_t *) &__reint_851, __p1_851); \ - __ret_851 = *(float16_t *) &__reint1_851; \ - __ret_851; \ +#define vget_lane_f16(__p0_847, __p1_847) __extension__ ({ \ + float16_t __ret_847; \ + float16x4_t __s0_847 = __p0_847; \ +float16x4_t __reint_847 = __s0_847; \ +int16_t __reint1_847 = vget_lane_s16(*(int16x4_t *) &__reint_847, __p1_847); \ + __ret_847 = *(float16_t *) &__reint1_847; \ + __ret_847; \ }) #else -#define vget_lane_f16(__p0_852, __p1_852) __extension__ ({ \ - float16_t __ret_852; \ - float16x4_t __s0_852 = __p0_852; \ - float16x4_t __rev0_852; __rev0_852 = __builtin_shufflevector(__s0_852, __s0_852, 3, 2, 1, 0); \ -float16x4_t __reint_852 = __rev0_852; \ -int16_t __reint1_852 = __noswap_vget_lane_s16(*(int16x4_t *) &__reint_852, __p1_852); \ - __ret_852 = *(float16_t *) &__reint1_852; \ - __ret_852; \ +#define vget_lane_f16(__p0_848, __p1_848) __extension__ ({ \ + float16_t __ret_848; \ + float16x4_t __s0_848 = __p0_848; \ + float16x4_t __rev0_848; __rev0_848 = __builtin_shufflevector(__s0_848, __s0_848, 3, 2, 1, 0); \ +float16x4_t __reint_848 = __rev0_848; \ +int16_t __reint1_848 = __noswap_vget_lane_s16(*(int16x4_t *) &__reint_848, __p1_848); \ + __ret_848 = *(float16_t *) &__reint1_848; \ + __ret_848; \ }) -#define __noswap_vget_lane_f16(__p0_853, __p1_853) __extension__ ({ \ - float16_t __ret_853; \ - float16x4_t __s0_853 = __p0_853; \ -float16x4_t __reint_853 = __s0_853; \ -int16_t __reint1_853 = __noswap_vget_lane_s16(*(int16x4_t *) &__reint_853, __p1_853); \ - __ret_853 = *(float16_t *) &__reint1_853; \ - __ret_853; \ +#define __noswap_vget_lane_f16(__p0_849, __p1_849) __extension__ ({ \ + float16_t __ret_849; \ + float16x4_t __s0_849 = __p0_849; \ +float16x4_t __reint_849 = __s0_849; \ +int16_t __reint1_849 = __noswap_vget_lane_s16(*(int16x4_t *) &__reint_849, __p1_849); \ + __ret_849 = *(float16_t *) &__reint1_849; \ + __ret_849; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vgetq_lane_f16(__p0_854, __p1_854) __extension__ ({ \ - float16_t __ret_854; \ - float16x8_t __s0_854 = __p0_854; \ -float16x8_t __reint_854 = __s0_854; \ -int16_t __reint1_854 = vgetq_lane_s16(*(int16x8_t *) &__reint_854, __p1_854); \ - __ret_854 = *(float16_t *) &__reint1_854; \ - __ret_854; \ +#define vgetq_lane_f16(__p0_850, __p1_850) __extension__ ({ \ + float16_t __ret_850; \ + float16x8_t __s0_850 = __p0_850; \ +float16x8_t __reint_850 = __s0_850; \ +int16_t __reint1_850 = vgetq_lane_s16(*(int16x8_t *) &__reint_850, __p1_850); \ + __ret_850 = *(float16_t *) &__reint1_850; \ + __ret_850; \ }) #else -#define vgetq_lane_f16(__p0_855, __p1_855) __extension__ ({ \ - float16_t __ret_855; \ - float16x8_t __s0_855 = __p0_855; \ - float16x8_t __rev0_855; __rev0_855 = __builtin_shufflevector(__s0_855, __s0_855, 7, 6, 5, 4, 3, 2, 1, 0); \ -float16x8_t __reint_855 = __rev0_855; \ -int16_t __reint1_855 = __noswap_vgetq_lane_s16(*(int16x8_t *) &__reint_855, __p1_855); \ - __ret_855 = *(float16_t *) &__reint1_855; \ - __ret_855; \ +#define vgetq_lane_f16(__p0_851, __p1_851) __extension__ ({ \ + float16_t __ret_851; \ + float16x8_t __s0_851 = __p0_851; \ + float16x8_t __rev0_851; __rev0_851 = __builtin_shufflevector(__s0_851, __s0_851, 7, 6, 5, 4, 3, 2, 1, 0); \ +float16x8_t __reint_851 = __rev0_851; \ +int16_t __reint1_851 = __noswap_vgetq_lane_s16(*(int16x8_t *) &__reint_851, __p1_851); \ + __ret_851 = *(float16_t *) &__reint1_851; \ + __ret_851; \ }) -#define __noswap_vgetq_lane_f16(__p0_856, __p1_856) __extension__ ({ \ - float16_t __ret_856; \ - float16x8_t __s0_856 = __p0_856; \ -float16x8_t __reint_856 = __s0_856; \ -int16_t __reint1_856 = __noswap_vgetq_lane_s16(*(int16x8_t *) &__reint_856, __p1_856); \ - __ret_856 = *(float16_t *) &__reint1_856; \ - __ret_856; \ +#define __noswap_vgetq_lane_f16(__p0_852, __p1_852) __extension__ ({ \ + float16_t __ret_852; \ + float16x8_t __s0_852 = __p0_852; \ +float16x8_t __reint_852 = __s0_852; \ +int16_t __reint1_852 = __noswap_vgetq_lane_s16(*(int16x8_t *) &__reint_852, __p1_852); \ + __ret_852 = *(float16_t *) &__reint1_852; \ + __ret_852; \ }) #endif @@ -67640,101 +67478,101 @@ __ai int32x4_t __noswap_vmlal_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2 #endif #ifdef __LITTLE_ENDIAN__ -#define vmlal_lane_u32(__p0_857, __p1_857, __p2_857, __p3_857) __extension__ ({ \ - uint64x2_t __ret_857; \ - uint64x2_t __s0_857 = __p0_857; \ - uint32x2_t __s1_857 = __p1_857; \ - uint32x2_t __s2_857 = __p2_857; \ - __ret_857 = __s0_857 + vmull_u32(__s1_857, splat_lane_u32(__s2_857, __p3_857)); \ +#define vmlal_lane_u32(__p0_853, __p1_853, __p2_853, __p3_853) __extension__ ({ \ + uint64x2_t __ret_853; \ + uint64x2_t __s0_853 = __p0_853; \ + uint32x2_t __s1_853 = __p1_853; \ + uint32x2_t __s2_853 = __p2_853; \ + __ret_853 = __s0_853 + vmull_u32(__s1_853, splat_lane_u32(__s2_853, __p3_853)); \ + __ret_853; \ +}) +#else +#define vmlal_lane_u32(__p0_854, __p1_854, __p2_854, __p3_854) __extension__ ({ \ + uint64x2_t __ret_854; \ + uint64x2_t __s0_854 = __p0_854; \ + uint32x2_t __s1_854 = __p1_854; \ + uint32x2_t __s2_854 = __p2_854; \ + uint64x2_t __rev0_854; __rev0_854 = __builtin_shufflevector(__s0_854, __s0_854, 1, 0); \ + uint32x2_t __rev1_854; __rev1_854 = __builtin_shufflevector(__s1_854, __s1_854, 1, 0); \ + uint32x2_t __rev2_854; __rev2_854 = __builtin_shufflevector(__s2_854, __s2_854, 1, 0); \ + __ret_854 = __rev0_854 + __noswap_vmull_u32(__rev1_854, __noswap_splat_lane_u32(__rev2_854, __p3_854)); \ + __ret_854 = __builtin_shufflevector(__ret_854, __ret_854, 1, 0); \ + __ret_854; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmlal_lane_u16(__p0_855, __p1_855, __p2_855, __p3_855) __extension__ ({ \ + uint32x4_t __ret_855; \ + uint32x4_t __s0_855 = __p0_855; \ + uint16x4_t __s1_855 = __p1_855; \ + uint16x4_t __s2_855 = __p2_855; \ + __ret_855 = __s0_855 + vmull_u16(__s1_855, splat_lane_u16(__s2_855, __p3_855)); \ + __ret_855; \ +}) +#else +#define vmlal_lane_u16(__p0_856, __p1_856, __p2_856, __p3_856) __extension__ ({ \ + uint32x4_t __ret_856; \ + uint32x4_t __s0_856 = __p0_856; \ + uint16x4_t __s1_856 = __p1_856; \ + uint16x4_t __s2_856 = __p2_856; \ + uint32x4_t __rev0_856; __rev0_856 = __builtin_shufflevector(__s0_856, __s0_856, 3, 2, 1, 0); \ + uint16x4_t __rev1_856; __rev1_856 = __builtin_shufflevector(__s1_856, __s1_856, 3, 2, 1, 0); \ + uint16x4_t __rev2_856; __rev2_856 = __builtin_shufflevector(__s2_856, __s2_856, 3, 2, 1, 0); \ + __ret_856 = __rev0_856 + __noswap_vmull_u16(__rev1_856, __noswap_splat_lane_u16(__rev2_856, __p3_856)); \ + __ret_856 = __builtin_shufflevector(__ret_856, __ret_856, 3, 2, 1, 0); \ + __ret_856; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmlal_lane_s32(__p0_857, __p1_857, __p2_857, __p3_857) __extension__ ({ \ + int64x2_t __ret_857; \ + int64x2_t __s0_857 = __p0_857; \ + int32x2_t __s1_857 = __p1_857; \ + int32x2_t __s2_857 = __p2_857; \ + __ret_857 = __s0_857 + vmull_s32(__s1_857, splat_lane_s32(__s2_857, __p3_857)); \ __ret_857; \ }) #else -#define vmlal_lane_u32(__p0_858, __p1_858, __p2_858, __p3_858) __extension__ ({ \ - uint64x2_t __ret_858; \ - uint64x2_t __s0_858 = __p0_858; \ - uint32x2_t __s1_858 = __p1_858; \ - uint32x2_t __s2_858 = __p2_858; \ - uint64x2_t __rev0_858; __rev0_858 = __builtin_shufflevector(__s0_858, __s0_858, 1, 0); \ - uint32x2_t __rev1_858; __rev1_858 = __builtin_shufflevector(__s1_858, __s1_858, 1, 0); \ - uint32x2_t __rev2_858; __rev2_858 = __builtin_shufflevector(__s2_858, __s2_858, 1, 0); \ - __ret_858 = __rev0_858 + __noswap_vmull_u32(__rev1_858, __noswap_splat_lane_u32(__rev2_858, __p3_858)); \ +#define vmlal_lane_s32(__p0_858, __p1_858, __p2_858, __p3_858) __extension__ ({ \ + int64x2_t __ret_858; \ + int64x2_t __s0_858 = __p0_858; \ + int32x2_t __s1_858 = __p1_858; \ + int32x2_t __s2_858 = __p2_858; \ + int64x2_t __rev0_858; __rev0_858 = __builtin_shufflevector(__s0_858, __s0_858, 1, 0); \ + int32x2_t __rev1_858; __rev1_858 = __builtin_shufflevector(__s1_858, __s1_858, 1, 0); \ + int32x2_t __rev2_858; __rev2_858 = __builtin_shufflevector(__s2_858, __s2_858, 1, 0); \ + __ret_858 = __rev0_858 + __noswap_vmull_s32(__rev1_858, __noswap_splat_lane_s32(__rev2_858, __p3_858)); \ __ret_858 = __builtin_shufflevector(__ret_858, __ret_858, 1, 0); \ __ret_858; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vmlal_lane_u16(__p0_859, __p1_859, __p2_859, __p3_859) __extension__ ({ \ - uint32x4_t __ret_859; \ - uint32x4_t __s0_859 = __p0_859; \ - uint16x4_t __s1_859 = __p1_859; \ - uint16x4_t __s2_859 = __p2_859; \ - __ret_859 = __s0_859 + vmull_u16(__s1_859, splat_lane_u16(__s2_859, __p3_859)); \ +#define vmlal_lane_s16(__p0_859, __p1_859, __p2_859, __p3_859) __extension__ ({ \ + int32x4_t __ret_859; \ + int32x4_t __s0_859 = __p0_859; \ + int16x4_t __s1_859 = __p1_859; \ + int16x4_t __s2_859 = __p2_859; \ + __ret_859 = __s0_859 + vmull_s16(__s1_859, splat_lane_s16(__s2_859, __p3_859)); \ __ret_859; \ }) #else -#define vmlal_lane_u16(__p0_860, __p1_860, __p2_860, __p3_860) __extension__ ({ \ - uint32x4_t __ret_860; \ - uint32x4_t __s0_860 = __p0_860; \ - uint16x4_t __s1_860 = __p1_860; \ - uint16x4_t __s2_860 = __p2_860; \ - uint32x4_t __rev0_860; __rev0_860 = __builtin_shufflevector(__s0_860, __s0_860, 3, 2, 1, 0); \ - uint16x4_t __rev1_860; __rev1_860 = __builtin_shufflevector(__s1_860, __s1_860, 3, 2, 1, 0); \ - uint16x4_t __rev2_860; __rev2_860 = __builtin_shufflevector(__s2_860, __s2_860, 3, 2, 1, 0); \ - __ret_860 = __rev0_860 + __noswap_vmull_u16(__rev1_860, __noswap_splat_lane_u16(__rev2_860, __p3_860)); \ +#define vmlal_lane_s16(__p0_860, __p1_860, __p2_860, __p3_860) __extension__ ({ \ + int32x4_t __ret_860; \ + int32x4_t __s0_860 = __p0_860; \ + int16x4_t __s1_860 = __p1_860; \ + int16x4_t __s2_860 = __p2_860; \ + int32x4_t __rev0_860; __rev0_860 = __builtin_shufflevector(__s0_860, __s0_860, 3, 2, 1, 0); \ + int16x4_t __rev1_860; __rev1_860 = __builtin_shufflevector(__s1_860, __s1_860, 3, 2, 1, 0); \ + int16x4_t __rev2_860; __rev2_860 = __builtin_shufflevector(__s2_860, __s2_860, 3, 2, 1, 0); \ + __ret_860 = __rev0_860 + __noswap_vmull_s16(__rev1_860, __noswap_splat_lane_s16(__rev2_860, __p3_860)); \ __ret_860 = __builtin_shufflevector(__ret_860, __ret_860, 3, 2, 1, 0); \ __ret_860; \ }) #endif -#ifdef __LITTLE_ENDIAN__ -#define vmlal_lane_s32(__p0_861, __p1_861, __p2_861, __p3_861) __extension__ ({ \ - int64x2_t __ret_861; \ - int64x2_t __s0_861 = __p0_861; \ - int32x2_t __s1_861 = __p1_861; \ - int32x2_t __s2_861 = __p2_861; \ - __ret_861 = __s0_861 + vmull_s32(__s1_861, splat_lane_s32(__s2_861, __p3_861)); \ - __ret_861; \ -}) -#else -#define vmlal_lane_s32(__p0_862, __p1_862, __p2_862, __p3_862) __extension__ ({ \ - int64x2_t __ret_862; \ - int64x2_t __s0_862 = __p0_862; \ - int32x2_t __s1_862 = __p1_862; \ - int32x2_t __s2_862 = __p2_862; \ - int64x2_t __rev0_862; __rev0_862 = __builtin_shufflevector(__s0_862, __s0_862, 1, 0); \ - int32x2_t __rev1_862; __rev1_862 = __builtin_shufflevector(__s1_862, __s1_862, 1, 0); \ - int32x2_t __rev2_862; __rev2_862 = __builtin_shufflevector(__s2_862, __s2_862, 1, 0); \ - __ret_862 = __rev0_862 + __noswap_vmull_s32(__rev1_862, __noswap_splat_lane_s32(__rev2_862, __p3_862)); \ - __ret_862 = __builtin_shufflevector(__ret_862, __ret_862, 1, 0); \ - __ret_862; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmlal_lane_s16(__p0_863, __p1_863, __p2_863, __p3_863) __extension__ ({ \ - int32x4_t __ret_863; \ - int32x4_t __s0_863 = __p0_863; \ - int16x4_t __s1_863 = __p1_863; \ - int16x4_t __s2_863 = __p2_863; \ - __ret_863 = __s0_863 + vmull_s16(__s1_863, splat_lane_s16(__s2_863, __p3_863)); \ - __ret_863; \ -}) -#else -#define vmlal_lane_s16(__p0_864, __p1_864, __p2_864, __p3_864) __extension__ ({ \ - int32x4_t __ret_864; \ - int32x4_t __s0_864 = __p0_864; \ - int16x4_t __s1_864 = __p1_864; \ - int16x4_t __s2_864 = __p2_864; \ - int32x4_t __rev0_864; __rev0_864 = __builtin_shufflevector(__s0_864, __s0_864, 3, 2, 1, 0); \ - int16x4_t __rev1_864; __rev1_864 = __builtin_shufflevector(__s1_864, __s1_864, 3, 2, 1, 0); \ - int16x4_t __rev2_864; __rev2_864 = __builtin_shufflevector(__s2_864, __s2_864, 3, 2, 1, 0); \ - __ret_864 = __rev0_864 + __noswap_vmull_s16(__rev1_864, __noswap_splat_lane_s16(__rev2_864, __p3_864)); \ - __ret_864 = __builtin_shufflevector(__ret_864, __ret_864, 3, 2, 1, 0); \ - __ret_864; \ -}) -#endif - #ifdef __LITTLE_ENDIAN__ __ai uint64x2_t vmlal_n_u32(uint64x2_t __p0, uint32x2_t __p1, uint32_t __p2) { uint64x2_t __ret; @@ -67962,101 +67800,101 @@ __ai int32x4_t __noswap_vmlsl_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2 #endif #ifdef __LITTLE_ENDIAN__ -#define vmlsl_lane_u32(__p0_865, __p1_865, __p2_865, __p3_865) __extension__ ({ \ - uint64x2_t __ret_865; \ - uint64x2_t __s0_865 = __p0_865; \ - uint32x2_t __s1_865 = __p1_865; \ - uint32x2_t __s2_865 = __p2_865; \ - __ret_865 = __s0_865 - vmull_u32(__s1_865, splat_lane_u32(__s2_865, __p3_865)); \ +#define vmlsl_lane_u32(__p0_861, __p1_861, __p2_861, __p3_861) __extension__ ({ \ + uint64x2_t __ret_861; \ + uint64x2_t __s0_861 = __p0_861; \ + uint32x2_t __s1_861 = __p1_861; \ + uint32x2_t __s2_861 = __p2_861; \ + __ret_861 = __s0_861 - vmull_u32(__s1_861, splat_lane_u32(__s2_861, __p3_861)); \ + __ret_861; \ +}) +#else +#define vmlsl_lane_u32(__p0_862, __p1_862, __p2_862, __p3_862) __extension__ ({ \ + uint64x2_t __ret_862; \ + uint64x2_t __s0_862 = __p0_862; \ + uint32x2_t __s1_862 = __p1_862; \ + uint32x2_t __s2_862 = __p2_862; \ + uint64x2_t __rev0_862; __rev0_862 = __builtin_shufflevector(__s0_862, __s0_862, 1, 0); \ + uint32x2_t __rev1_862; __rev1_862 = __builtin_shufflevector(__s1_862, __s1_862, 1, 0); \ + uint32x2_t __rev2_862; __rev2_862 = __builtin_shufflevector(__s2_862, __s2_862, 1, 0); \ + __ret_862 = __rev0_862 - __noswap_vmull_u32(__rev1_862, __noswap_splat_lane_u32(__rev2_862, __p3_862)); \ + __ret_862 = __builtin_shufflevector(__ret_862, __ret_862, 1, 0); \ + __ret_862; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmlsl_lane_u16(__p0_863, __p1_863, __p2_863, __p3_863) __extension__ ({ \ + uint32x4_t __ret_863; \ + uint32x4_t __s0_863 = __p0_863; \ + uint16x4_t __s1_863 = __p1_863; \ + uint16x4_t __s2_863 = __p2_863; \ + __ret_863 = __s0_863 - vmull_u16(__s1_863, splat_lane_u16(__s2_863, __p3_863)); \ + __ret_863; \ +}) +#else +#define vmlsl_lane_u16(__p0_864, __p1_864, __p2_864, __p3_864) __extension__ ({ \ + uint32x4_t __ret_864; \ + uint32x4_t __s0_864 = __p0_864; \ + uint16x4_t __s1_864 = __p1_864; \ + uint16x4_t __s2_864 = __p2_864; \ + uint32x4_t __rev0_864; __rev0_864 = __builtin_shufflevector(__s0_864, __s0_864, 3, 2, 1, 0); \ + uint16x4_t __rev1_864; __rev1_864 = __builtin_shufflevector(__s1_864, __s1_864, 3, 2, 1, 0); \ + uint16x4_t __rev2_864; __rev2_864 = __builtin_shufflevector(__s2_864, __s2_864, 3, 2, 1, 0); \ + __ret_864 = __rev0_864 - __noswap_vmull_u16(__rev1_864, __noswap_splat_lane_u16(__rev2_864, __p3_864)); \ + __ret_864 = __builtin_shufflevector(__ret_864, __ret_864, 3, 2, 1, 0); \ + __ret_864; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmlsl_lane_s32(__p0_865, __p1_865, __p2_865, __p3_865) __extension__ ({ \ + int64x2_t __ret_865; \ + int64x2_t __s0_865 = __p0_865; \ + int32x2_t __s1_865 = __p1_865; \ + int32x2_t __s2_865 = __p2_865; \ + __ret_865 = __s0_865 - vmull_s32(__s1_865, splat_lane_s32(__s2_865, __p3_865)); \ __ret_865; \ }) #else -#define vmlsl_lane_u32(__p0_866, __p1_866, __p2_866, __p3_866) __extension__ ({ \ - uint64x2_t __ret_866; \ - uint64x2_t __s0_866 = __p0_866; \ - uint32x2_t __s1_866 = __p1_866; \ - uint32x2_t __s2_866 = __p2_866; \ - uint64x2_t __rev0_866; __rev0_866 = __builtin_shufflevector(__s0_866, __s0_866, 1, 0); \ - uint32x2_t __rev1_866; __rev1_866 = __builtin_shufflevector(__s1_866, __s1_866, 1, 0); \ - uint32x2_t __rev2_866; __rev2_866 = __builtin_shufflevector(__s2_866, __s2_866, 1, 0); \ - __ret_866 = __rev0_866 - __noswap_vmull_u32(__rev1_866, __noswap_splat_lane_u32(__rev2_866, __p3_866)); \ +#define vmlsl_lane_s32(__p0_866, __p1_866, __p2_866, __p3_866) __extension__ ({ \ + int64x2_t __ret_866; \ + int64x2_t __s0_866 = __p0_866; \ + int32x2_t __s1_866 = __p1_866; \ + int32x2_t __s2_866 = __p2_866; \ + int64x2_t __rev0_866; __rev0_866 = __builtin_shufflevector(__s0_866, __s0_866, 1, 0); \ + int32x2_t __rev1_866; __rev1_866 = __builtin_shufflevector(__s1_866, __s1_866, 1, 0); \ + int32x2_t __rev2_866; __rev2_866 = __builtin_shufflevector(__s2_866, __s2_866, 1, 0); \ + __ret_866 = __rev0_866 - __noswap_vmull_s32(__rev1_866, __noswap_splat_lane_s32(__rev2_866, __p3_866)); \ __ret_866 = __builtin_shufflevector(__ret_866, __ret_866, 1, 0); \ __ret_866; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vmlsl_lane_u16(__p0_867, __p1_867, __p2_867, __p3_867) __extension__ ({ \ - uint32x4_t __ret_867; \ - uint32x4_t __s0_867 = __p0_867; \ - uint16x4_t __s1_867 = __p1_867; \ - uint16x4_t __s2_867 = __p2_867; \ - __ret_867 = __s0_867 - vmull_u16(__s1_867, splat_lane_u16(__s2_867, __p3_867)); \ +#define vmlsl_lane_s16(__p0_867, __p1_867, __p2_867, __p3_867) __extension__ ({ \ + int32x4_t __ret_867; \ + int32x4_t __s0_867 = __p0_867; \ + int16x4_t __s1_867 = __p1_867; \ + int16x4_t __s2_867 = __p2_867; \ + __ret_867 = __s0_867 - vmull_s16(__s1_867, splat_lane_s16(__s2_867, __p3_867)); \ __ret_867; \ }) #else -#define vmlsl_lane_u16(__p0_868, __p1_868, __p2_868, __p3_868) __extension__ ({ \ - uint32x4_t __ret_868; \ - uint32x4_t __s0_868 = __p0_868; \ - uint16x4_t __s1_868 = __p1_868; \ - uint16x4_t __s2_868 = __p2_868; \ - uint32x4_t __rev0_868; __rev0_868 = __builtin_shufflevector(__s0_868, __s0_868, 3, 2, 1, 0); \ - uint16x4_t __rev1_868; __rev1_868 = __builtin_shufflevector(__s1_868, __s1_868, 3, 2, 1, 0); \ - uint16x4_t __rev2_868; __rev2_868 = __builtin_shufflevector(__s2_868, __s2_868, 3, 2, 1, 0); \ - __ret_868 = __rev0_868 - __noswap_vmull_u16(__rev1_868, __noswap_splat_lane_u16(__rev2_868, __p3_868)); \ +#define vmlsl_lane_s16(__p0_868, __p1_868, __p2_868, __p3_868) __extension__ ({ \ + int32x4_t __ret_868; \ + int32x4_t __s0_868 = __p0_868; \ + int16x4_t __s1_868 = __p1_868; \ + int16x4_t __s2_868 = __p2_868; \ + int32x4_t __rev0_868; __rev0_868 = __builtin_shufflevector(__s0_868, __s0_868, 3, 2, 1, 0); \ + int16x4_t __rev1_868; __rev1_868 = __builtin_shufflevector(__s1_868, __s1_868, 3, 2, 1, 0); \ + int16x4_t __rev2_868; __rev2_868 = __builtin_shufflevector(__s2_868, __s2_868, 3, 2, 1, 0); \ + __ret_868 = __rev0_868 - __noswap_vmull_s16(__rev1_868, __noswap_splat_lane_s16(__rev2_868, __p3_868)); \ __ret_868 = __builtin_shufflevector(__ret_868, __ret_868, 3, 2, 1, 0); \ __ret_868; \ }) #endif -#ifdef __LITTLE_ENDIAN__ -#define vmlsl_lane_s32(__p0_869, __p1_869, __p2_869, __p3_869) __extension__ ({ \ - int64x2_t __ret_869; \ - int64x2_t __s0_869 = __p0_869; \ - int32x2_t __s1_869 = __p1_869; \ - int32x2_t __s2_869 = __p2_869; \ - __ret_869 = __s0_869 - vmull_s32(__s1_869, splat_lane_s32(__s2_869, __p3_869)); \ - __ret_869; \ -}) -#else -#define vmlsl_lane_s32(__p0_870, __p1_870, __p2_870, __p3_870) __extension__ ({ \ - int64x2_t __ret_870; \ - int64x2_t __s0_870 = __p0_870; \ - int32x2_t __s1_870 = __p1_870; \ - int32x2_t __s2_870 = __p2_870; \ - int64x2_t __rev0_870; __rev0_870 = __builtin_shufflevector(__s0_870, __s0_870, 1, 0); \ - int32x2_t __rev1_870; __rev1_870 = __builtin_shufflevector(__s1_870, __s1_870, 1, 0); \ - int32x2_t __rev2_870; __rev2_870 = __builtin_shufflevector(__s2_870, __s2_870, 1, 0); \ - __ret_870 = __rev0_870 - __noswap_vmull_s32(__rev1_870, __noswap_splat_lane_s32(__rev2_870, __p3_870)); \ - __ret_870 = __builtin_shufflevector(__ret_870, __ret_870, 1, 0); \ - __ret_870; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmlsl_lane_s16(__p0_871, __p1_871, __p2_871, __p3_871) __extension__ ({ \ - int32x4_t __ret_871; \ - int32x4_t __s0_871 = __p0_871; \ - int16x4_t __s1_871 = __p1_871; \ - int16x4_t __s2_871 = __p2_871; \ - __ret_871 = __s0_871 - vmull_s16(__s1_871, splat_lane_s16(__s2_871, __p3_871)); \ - __ret_871; \ -}) -#else -#define vmlsl_lane_s16(__p0_872, __p1_872, __p2_872, __p3_872) __extension__ ({ \ - int32x4_t __ret_872; \ - int32x4_t __s0_872 = __p0_872; \ - int16x4_t __s1_872 = __p1_872; \ - int16x4_t __s2_872 = __p2_872; \ - int32x4_t __rev0_872; __rev0_872 = __builtin_shufflevector(__s0_872, __s0_872, 3, 2, 1, 0); \ - int16x4_t __rev1_872; __rev1_872 = __builtin_shufflevector(__s1_872, __s1_872, 3, 2, 1, 0); \ - int16x4_t __rev2_872; __rev2_872 = __builtin_shufflevector(__s2_872, __s2_872, 3, 2, 1, 0); \ - __ret_872 = __rev0_872 - __noswap_vmull_s16(__rev1_872, __noswap_splat_lane_s16(__rev2_872, __p3_872)); \ - __ret_872 = __builtin_shufflevector(__ret_872, __ret_872, 3, 2, 1, 0); \ - __ret_872; \ -}) -#endif - #ifdef __LITTLE_ENDIAN__ __ai uint64x2_t vmlsl_n_u32(uint64x2_t __p0, uint32x2_t __p1, uint32_t __p2) { uint64x2_t __ret; @@ -68146,69 +67984,116 @@ __ai int32x4_t __noswap_vmlsl_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2 #endif #ifdef __LITTLE_ENDIAN__ -#define vset_lane_f16(__p0_873, __p1_873, __p2_873) __extension__ ({ \ - float16x4_t __ret_873; \ - float16_t __s0_873 = __p0_873; \ - float16x4_t __s1_873 = __p1_873; \ -float16_t __reint_873 = __s0_873; \ -float16x4_t __reint1_873 = __s1_873; \ -int16x4_t __reint2_873 = vset_lane_s16(*(int16_t *) &__reint_873, *(int16x4_t *) &__reint1_873, __p2_873); \ - __ret_873 = *(float16x4_t *) &__reint2_873; \ +#define vset_lane_f16(__p0_869, __p1_869, __p2_869) __extension__ ({ \ + float16x4_t __ret_869; \ + float16_t __s0_869 = __p0_869; \ + float16x4_t __s1_869 = __p1_869; \ +float16_t __reint_869 = __s0_869; \ +float16x4_t __reint1_869 = __s1_869; \ +int16x4_t __reint2_869 = vset_lane_s16(*(int16_t *) &__reint_869, *(int16x4_t *) &__reint1_869, __p2_869); \ + __ret_869 = *(float16x4_t *) &__reint2_869; \ + __ret_869; \ +}) +#else +#define vset_lane_f16(__p0_870, __p1_870, __p2_870) __extension__ ({ \ + float16x4_t __ret_870; \ + float16_t __s0_870 = __p0_870; \ + float16x4_t __s1_870 = __p1_870; \ + float16x4_t __rev1_870; __rev1_870 = __builtin_shufflevector(__s1_870, __s1_870, 3, 2, 1, 0); \ +float16_t __reint_870 = __s0_870; \ +float16x4_t __reint1_870 = __rev1_870; \ +int16x4_t __reint2_870 = __noswap_vset_lane_s16(*(int16_t *) &__reint_870, *(int16x4_t *) &__reint1_870, __p2_870); \ + __ret_870 = *(float16x4_t *) &__reint2_870; \ + __ret_870 = __builtin_shufflevector(__ret_870, __ret_870, 3, 2, 1, 0); \ + __ret_870; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsetq_lane_f16(__p0_871, __p1_871, __p2_871) __extension__ ({ \ + float16x8_t __ret_871; \ + float16_t __s0_871 = __p0_871; \ + float16x8_t __s1_871 = __p1_871; \ +float16_t __reint_871 = __s0_871; \ +float16x8_t __reint1_871 = __s1_871; \ +int16x8_t __reint2_871 = vsetq_lane_s16(*(int16_t *) &__reint_871, *(int16x8_t *) &__reint1_871, __p2_871); \ + __ret_871 = *(float16x8_t *) &__reint2_871; \ + __ret_871; \ +}) +#else +#define vsetq_lane_f16(__p0_872, __p1_872, __p2_872) __extension__ ({ \ + float16x8_t __ret_872; \ + float16_t __s0_872 = __p0_872; \ + float16x8_t __s1_872 = __p1_872; \ + float16x8_t __rev1_872; __rev1_872 = __builtin_shufflevector(__s1_872, __s1_872, 7, 6, 5, 4, 3, 2, 1, 0); \ +float16_t __reint_872 = __s0_872; \ +float16x8_t __reint1_872 = __rev1_872; \ +int16x8_t __reint2_872 = __noswap_vsetq_lane_s16(*(int16_t *) &__reint_872, *(int16x8_t *) &__reint1_872, __p2_872); \ + __ret_872 = *(float16x8_t *) &__reint2_872; \ + __ret_872 = __builtin_shufflevector(__ret_872, __ret_872, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_872; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vbfmlalbq_lane_f32(__p0_873, __p1_873, __p2_873, __p3_873) __extension__ ({ \ + float32x4_t __ret_873; \ + float32x4_t __s0_873 = __p0_873; \ + bfloat16x8_t __s1_873 = __p1_873; \ + bfloat16x4_t __s2_873 = __p2_873; \ + __ret_873 = vbfmlalbq_f32(__s0_873, __s1_873, (bfloat16x8_t) {vget_lane_bf16(__s2_873, __p3_873), vget_lane_bf16(__s2_873, __p3_873), vget_lane_bf16(__s2_873, __p3_873), vget_lane_bf16(__s2_873, __p3_873), vget_lane_bf16(__s2_873, __p3_873), vget_lane_bf16(__s2_873, __p3_873), vget_lane_bf16(__s2_873, __p3_873), vget_lane_bf16(__s2_873, __p3_873)}); \ __ret_873; \ }) #else -#define vset_lane_f16(__p0_874, __p1_874, __p2_874) __extension__ ({ \ - float16x4_t __ret_874; \ - float16_t __s0_874 = __p0_874; \ - float16x4_t __s1_874 = __p1_874; \ - float16x4_t __rev1_874; __rev1_874 = __builtin_shufflevector(__s1_874, __s1_874, 3, 2, 1, 0); \ -float16_t __reint_874 = __s0_874; \ -float16x4_t __reint1_874 = __rev1_874; \ -int16x4_t __reint2_874 = __noswap_vset_lane_s16(*(int16_t *) &__reint_874, *(int16x4_t *) &__reint1_874, __p2_874); \ - __ret_874 = *(float16x4_t *) &__reint2_874; \ +#define vbfmlalbq_lane_f32(__p0_874, __p1_874, __p2_874, __p3_874) __extension__ ({ \ + float32x4_t __ret_874; \ + float32x4_t __s0_874 = __p0_874; \ + bfloat16x8_t __s1_874 = __p1_874; \ + bfloat16x4_t __s2_874 = __p2_874; \ + float32x4_t __rev0_874; __rev0_874 = __builtin_shufflevector(__s0_874, __s0_874, 3, 2, 1, 0); \ + bfloat16x8_t __rev1_874; __rev1_874 = __builtin_shufflevector(__s1_874, __s1_874, 7, 6, 5, 4, 3, 2, 1, 0); \ + bfloat16x4_t __rev2_874; __rev2_874 = __builtin_shufflevector(__s2_874, __s2_874, 3, 2, 1, 0); \ + __ret_874 = __noswap_vbfmlalbq_f32(__rev0_874, __rev1_874, (bfloat16x8_t) {__noswap_vget_lane_bf16(__rev2_874, __p3_874), __noswap_vget_lane_bf16(__rev2_874, __p3_874), __noswap_vget_lane_bf16(__rev2_874, __p3_874), __noswap_vget_lane_bf16(__rev2_874, __p3_874), __noswap_vget_lane_bf16(__rev2_874, __p3_874), __noswap_vget_lane_bf16(__rev2_874, __p3_874), __noswap_vget_lane_bf16(__rev2_874, __p3_874), __noswap_vget_lane_bf16(__rev2_874, __p3_874)}); \ __ret_874 = __builtin_shufflevector(__ret_874, __ret_874, 3, 2, 1, 0); \ __ret_874; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vsetq_lane_f16(__p0_875, __p1_875, __p2_875) __extension__ ({ \ - float16x8_t __ret_875; \ - float16_t __s0_875 = __p0_875; \ - float16x8_t __s1_875 = __p1_875; \ -float16_t __reint_875 = __s0_875; \ -float16x8_t __reint1_875 = __s1_875; \ -int16x8_t __reint2_875 = vsetq_lane_s16(*(int16_t *) &__reint_875, *(int16x8_t *) &__reint1_875, __p2_875); \ - __ret_875 = *(float16x8_t *) &__reint2_875; \ +#define vbfmlalbq_laneq_f32(__p0_875, __p1_875, __p2_875, __p3_875) __extension__ ({ \ + float32x4_t __ret_875; \ + float32x4_t __s0_875 = __p0_875; \ + bfloat16x8_t __s1_875 = __p1_875; \ + bfloat16x8_t __s2_875 = __p2_875; \ + __ret_875 = vbfmlalbq_f32(__s0_875, __s1_875, (bfloat16x8_t) {vgetq_lane_bf16(__s2_875, __p3_875), vgetq_lane_bf16(__s2_875, __p3_875), vgetq_lane_bf16(__s2_875, __p3_875), vgetq_lane_bf16(__s2_875, __p3_875), vgetq_lane_bf16(__s2_875, __p3_875), vgetq_lane_bf16(__s2_875, __p3_875), vgetq_lane_bf16(__s2_875, __p3_875), vgetq_lane_bf16(__s2_875, __p3_875)}); \ __ret_875; \ }) #else -#define vsetq_lane_f16(__p0_876, __p1_876, __p2_876) __extension__ ({ \ - float16x8_t __ret_876; \ - float16_t __s0_876 = __p0_876; \ - float16x8_t __s1_876 = __p1_876; \ - float16x8_t __rev1_876; __rev1_876 = __builtin_shufflevector(__s1_876, __s1_876, 7, 6, 5, 4, 3, 2, 1, 0); \ -float16_t __reint_876 = __s0_876; \ -float16x8_t __reint1_876 = __rev1_876; \ -int16x8_t __reint2_876 = __noswap_vsetq_lane_s16(*(int16_t *) &__reint_876, *(int16x8_t *) &__reint1_876, __p2_876); \ - __ret_876 = *(float16x8_t *) &__reint2_876; \ - __ret_876 = __builtin_shufflevector(__ret_876, __ret_876, 7, 6, 5, 4, 3, 2, 1, 0); \ +#define vbfmlalbq_laneq_f32(__p0_876, __p1_876, __p2_876, __p3_876) __extension__ ({ \ + float32x4_t __ret_876; \ + float32x4_t __s0_876 = __p0_876; \ + bfloat16x8_t __s1_876 = __p1_876; \ + bfloat16x8_t __s2_876 = __p2_876; \ + float32x4_t __rev0_876; __rev0_876 = __builtin_shufflevector(__s0_876, __s0_876, 3, 2, 1, 0); \ + bfloat16x8_t __rev1_876; __rev1_876 = __builtin_shufflevector(__s1_876, __s1_876, 7, 6, 5, 4, 3, 2, 1, 0); \ + bfloat16x8_t __rev2_876; __rev2_876 = __builtin_shufflevector(__s2_876, __s2_876, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_876 = __noswap_vbfmlalbq_f32(__rev0_876, __rev1_876, (bfloat16x8_t) {__noswap_vgetq_lane_bf16(__rev2_876, __p3_876), __noswap_vgetq_lane_bf16(__rev2_876, __p3_876), __noswap_vgetq_lane_bf16(__rev2_876, __p3_876), __noswap_vgetq_lane_bf16(__rev2_876, __p3_876), __noswap_vgetq_lane_bf16(__rev2_876, __p3_876), __noswap_vgetq_lane_bf16(__rev2_876, __p3_876), __noswap_vgetq_lane_bf16(__rev2_876, __p3_876), __noswap_vgetq_lane_bf16(__rev2_876, __p3_876)}); \ + __ret_876 = __builtin_shufflevector(__ret_876, __ret_876, 3, 2, 1, 0); \ __ret_876; \ }) #endif -#if defined(__ARM_FEATURE_BF16_VECTOR_ARITHMETIC) #ifdef __LITTLE_ENDIAN__ -#define vbfmlalbq_lane_f32(__p0_877, __p1_877, __p2_877, __p3_877) __extension__ ({ \ +#define vbfmlaltq_lane_f32(__p0_877, __p1_877, __p2_877, __p3_877) __extension__ ({ \ float32x4_t __ret_877; \ float32x4_t __s0_877 = __p0_877; \ bfloat16x8_t __s1_877 = __p1_877; \ bfloat16x4_t __s2_877 = __p2_877; \ - __ret_877 = vbfmlalbq_f32(__s0_877, __s1_877, (bfloat16x8_t) {vget_lane_bf16(__s2_877, __p3_877), vget_lane_bf16(__s2_877, __p3_877), vget_lane_bf16(__s2_877, __p3_877), vget_lane_bf16(__s2_877, __p3_877), vget_lane_bf16(__s2_877, __p3_877), vget_lane_bf16(__s2_877, __p3_877), vget_lane_bf16(__s2_877, __p3_877), vget_lane_bf16(__s2_877, __p3_877)}); \ + __ret_877 = vbfmlaltq_f32(__s0_877, __s1_877, (bfloat16x8_t) {vget_lane_bf16(__s2_877, __p3_877), vget_lane_bf16(__s2_877, __p3_877), vget_lane_bf16(__s2_877, __p3_877), vget_lane_bf16(__s2_877, __p3_877), vget_lane_bf16(__s2_877, __p3_877), vget_lane_bf16(__s2_877, __p3_877), vget_lane_bf16(__s2_877, __p3_877), vget_lane_bf16(__s2_877, __p3_877)}); \ __ret_877; \ }) #else -#define vbfmlalbq_lane_f32(__p0_878, __p1_878, __p2_878, __p3_878) __extension__ ({ \ +#define vbfmlaltq_lane_f32(__p0_878, __p1_878, __p2_878, __p3_878) __extension__ ({ \ float32x4_t __ret_878; \ float32x4_t __s0_878 = __p0_878; \ bfloat16x8_t __s1_878 = __p1_878; \ @@ -68216,23 +68101,23 @@ int16x8_t __reint2_876 = __noswap_vsetq_lane_s16(*(int16_t *) &__reint_876, *(in float32x4_t __rev0_878; __rev0_878 = __builtin_shufflevector(__s0_878, __s0_878, 3, 2, 1, 0); \ bfloat16x8_t __rev1_878; __rev1_878 = __builtin_shufflevector(__s1_878, __s1_878, 7, 6, 5, 4, 3, 2, 1, 0); \ bfloat16x4_t __rev2_878; __rev2_878 = __builtin_shufflevector(__s2_878, __s2_878, 3, 2, 1, 0); \ - __ret_878 = __noswap_vbfmlalbq_f32(__rev0_878, __rev1_878, (bfloat16x8_t) {__noswap_vget_lane_bf16(__rev2_878, __p3_878), __noswap_vget_lane_bf16(__rev2_878, __p3_878), __noswap_vget_lane_bf16(__rev2_878, __p3_878), __noswap_vget_lane_bf16(__rev2_878, __p3_878), __noswap_vget_lane_bf16(__rev2_878, __p3_878), __noswap_vget_lane_bf16(__rev2_878, __p3_878), __noswap_vget_lane_bf16(__rev2_878, __p3_878), __noswap_vget_lane_bf16(__rev2_878, __p3_878)}); \ + __ret_878 = __noswap_vbfmlaltq_f32(__rev0_878, __rev1_878, (bfloat16x8_t) {__noswap_vget_lane_bf16(__rev2_878, __p3_878), __noswap_vget_lane_bf16(__rev2_878, __p3_878), __noswap_vget_lane_bf16(__rev2_878, __p3_878), __noswap_vget_lane_bf16(__rev2_878, __p3_878), __noswap_vget_lane_bf16(__rev2_878, __p3_878), __noswap_vget_lane_bf16(__rev2_878, __p3_878), __noswap_vget_lane_bf16(__rev2_878, __p3_878), __noswap_vget_lane_bf16(__rev2_878, __p3_878)}); \ __ret_878 = __builtin_shufflevector(__ret_878, __ret_878, 3, 2, 1, 0); \ __ret_878; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vbfmlalbq_laneq_f32(__p0_879, __p1_879, __p2_879, __p3_879) __extension__ ({ \ +#define vbfmlaltq_laneq_f32(__p0_879, __p1_879, __p2_879, __p3_879) __extension__ ({ \ float32x4_t __ret_879; \ float32x4_t __s0_879 = __p0_879; \ bfloat16x8_t __s1_879 = __p1_879; \ bfloat16x8_t __s2_879 = __p2_879; \ - __ret_879 = vbfmlalbq_f32(__s0_879, __s1_879, (bfloat16x8_t) {vgetq_lane_bf16(__s2_879, __p3_879), vgetq_lane_bf16(__s2_879, __p3_879), vgetq_lane_bf16(__s2_879, __p3_879), vgetq_lane_bf16(__s2_879, __p3_879), vgetq_lane_bf16(__s2_879, __p3_879), vgetq_lane_bf16(__s2_879, __p3_879), vgetq_lane_bf16(__s2_879, __p3_879), vgetq_lane_bf16(__s2_879, __p3_879)}); \ + __ret_879 = vbfmlaltq_f32(__s0_879, __s1_879, (bfloat16x8_t) {vgetq_lane_bf16(__s2_879, __p3_879), vgetq_lane_bf16(__s2_879, __p3_879), vgetq_lane_bf16(__s2_879, __p3_879), vgetq_lane_bf16(__s2_879, __p3_879), vgetq_lane_bf16(__s2_879, __p3_879), vgetq_lane_bf16(__s2_879, __p3_879), vgetq_lane_bf16(__s2_879, __p3_879), vgetq_lane_bf16(__s2_879, __p3_879)}); \ __ret_879; \ }) #else -#define vbfmlalbq_laneq_f32(__p0_880, __p1_880, __p2_880, __p3_880) __extension__ ({ \ +#define vbfmlaltq_laneq_f32(__p0_880, __p1_880, __p2_880, __p3_880) __extension__ ({ \ float32x4_t __ret_880; \ float32x4_t __s0_880 = __p0_880; \ bfloat16x8_t __s1_880 = __p1_880; \ @@ -68240,68 +68125,20 @@ int16x8_t __reint2_876 = __noswap_vsetq_lane_s16(*(int16_t *) &__reint_876, *(in float32x4_t __rev0_880; __rev0_880 = __builtin_shufflevector(__s0_880, __s0_880, 3, 2, 1, 0); \ bfloat16x8_t __rev1_880; __rev1_880 = __builtin_shufflevector(__s1_880, __s1_880, 7, 6, 5, 4, 3, 2, 1, 0); \ bfloat16x8_t __rev2_880; __rev2_880 = __builtin_shufflevector(__s2_880, __s2_880, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_880 = __noswap_vbfmlalbq_f32(__rev0_880, __rev1_880, (bfloat16x8_t) {__noswap_vgetq_lane_bf16(__rev2_880, __p3_880), __noswap_vgetq_lane_bf16(__rev2_880, __p3_880), __noswap_vgetq_lane_bf16(__rev2_880, __p3_880), __noswap_vgetq_lane_bf16(__rev2_880, __p3_880), __noswap_vgetq_lane_bf16(__rev2_880, __p3_880), __noswap_vgetq_lane_bf16(__rev2_880, __p3_880), __noswap_vgetq_lane_bf16(__rev2_880, __p3_880), __noswap_vgetq_lane_bf16(__rev2_880, __p3_880)}); \ + __ret_880 = __noswap_vbfmlaltq_f32(__rev0_880, __rev1_880, (bfloat16x8_t) {__noswap_vgetq_lane_bf16(__rev2_880, __p3_880), __noswap_vgetq_lane_bf16(__rev2_880, __p3_880), __noswap_vgetq_lane_bf16(__rev2_880, __p3_880), __noswap_vgetq_lane_bf16(__rev2_880, __p3_880), __noswap_vgetq_lane_bf16(__rev2_880, __p3_880), __noswap_vgetq_lane_bf16(__rev2_880, __p3_880), __noswap_vgetq_lane_bf16(__rev2_880, __p3_880), __noswap_vgetq_lane_bf16(__rev2_880, __p3_880)}); \ __ret_880 = __builtin_shufflevector(__ret_880, __ret_880, 3, 2, 1, 0); \ __ret_880; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vbfmlaltq_lane_f32(__p0_881, __p1_881, __p2_881, __p3_881) __extension__ ({ \ - float32x4_t __ret_881; \ - float32x4_t __s0_881 = __p0_881; \ - bfloat16x8_t __s1_881 = __p1_881; \ - bfloat16x4_t __s2_881 = __p2_881; \ - __ret_881 = vbfmlaltq_f32(__s0_881, __s1_881, (bfloat16x8_t) {vget_lane_bf16(__s2_881, __p3_881), vget_lane_bf16(__s2_881, __p3_881), vget_lane_bf16(__s2_881, __p3_881), vget_lane_bf16(__s2_881, __p3_881), vget_lane_bf16(__s2_881, __p3_881), vget_lane_bf16(__s2_881, __p3_881), vget_lane_bf16(__s2_881, __p3_881), vget_lane_bf16(__s2_881, __p3_881)}); \ - __ret_881; \ -}) -#else -#define vbfmlaltq_lane_f32(__p0_882, __p1_882, __p2_882, __p3_882) __extension__ ({ \ - float32x4_t __ret_882; \ - float32x4_t __s0_882 = __p0_882; \ - bfloat16x8_t __s1_882 = __p1_882; \ - bfloat16x4_t __s2_882 = __p2_882; \ - float32x4_t __rev0_882; __rev0_882 = __builtin_shufflevector(__s0_882, __s0_882, 3, 2, 1, 0); \ - bfloat16x8_t __rev1_882; __rev1_882 = __builtin_shufflevector(__s1_882, __s1_882, 7, 6, 5, 4, 3, 2, 1, 0); \ - bfloat16x4_t __rev2_882; __rev2_882 = __builtin_shufflevector(__s2_882, __s2_882, 3, 2, 1, 0); \ - __ret_882 = __noswap_vbfmlaltq_f32(__rev0_882, __rev1_882, (bfloat16x8_t) {__noswap_vget_lane_bf16(__rev2_882, __p3_882), __noswap_vget_lane_bf16(__rev2_882, __p3_882), __noswap_vget_lane_bf16(__rev2_882, __p3_882), __noswap_vget_lane_bf16(__rev2_882, __p3_882), __noswap_vget_lane_bf16(__rev2_882, __p3_882), __noswap_vget_lane_bf16(__rev2_882, __p3_882), __noswap_vget_lane_bf16(__rev2_882, __p3_882), __noswap_vget_lane_bf16(__rev2_882, __p3_882)}); \ - __ret_882 = __builtin_shufflevector(__ret_882, __ret_882, 3, 2, 1, 0); \ - __ret_882; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vbfmlaltq_laneq_f32(__p0_883, __p1_883, __p2_883, __p3_883) __extension__ ({ \ - float32x4_t __ret_883; \ - float32x4_t __s0_883 = __p0_883; \ - bfloat16x8_t __s1_883 = __p1_883; \ - bfloat16x8_t __s2_883 = __p2_883; \ - __ret_883 = vbfmlaltq_f32(__s0_883, __s1_883, (bfloat16x8_t) {vgetq_lane_bf16(__s2_883, __p3_883), vgetq_lane_bf16(__s2_883, __p3_883), vgetq_lane_bf16(__s2_883, __p3_883), vgetq_lane_bf16(__s2_883, __p3_883), vgetq_lane_bf16(__s2_883, __p3_883), vgetq_lane_bf16(__s2_883, __p3_883), vgetq_lane_bf16(__s2_883, __p3_883), vgetq_lane_bf16(__s2_883, __p3_883)}); \ - __ret_883; \ -}) -#else -#define vbfmlaltq_laneq_f32(__p0_884, __p1_884, __p2_884, __p3_884) __extension__ ({ \ - float32x4_t __ret_884; \ - float32x4_t __s0_884 = __p0_884; \ - bfloat16x8_t __s1_884 = __p1_884; \ - bfloat16x8_t __s2_884 = __p2_884; \ - float32x4_t __rev0_884; __rev0_884 = __builtin_shufflevector(__s0_884, __s0_884, 3, 2, 1, 0); \ - bfloat16x8_t __rev1_884; __rev1_884 = __builtin_shufflevector(__s1_884, __s1_884, 7, 6, 5, 4, 3, 2, 1, 0); \ - bfloat16x8_t __rev2_884; __rev2_884 = __builtin_shufflevector(__s2_884, __s2_884, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_884 = __noswap_vbfmlaltq_f32(__rev0_884, __rev1_884, (bfloat16x8_t) {__noswap_vgetq_lane_bf16(__rev2_884, __p3_884), __noswap_vgetq_lane_bf16(__rev2_884, __p3_884), __noswap_vgetq_lane_bf16(__rev2_884, __p3_884), __noswap_vgetq_lane_bf16(__rev2_884, __p3_884), __noswap_vgetq_lane_bf16(__rev2_884, __p3_884), __noswap_vgetq_lane_bf16(__rev2_884, __p3_884), __noswap_vgetq_lane_bf16(__rev2_884, __p3_884), __noswap_vgetq_lane_bf16(__rev2_884, __p3_884)}); \ - __ret_884 = __builtin_shufflevector(__ret_884, __ret_884, 3, 2, 1, 0); \ - __ret_884; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float32x4_t vcvtq_high_f32_bf16(bfloat16x8_t __p0) { +__ai __attribute__((target("bf16"))) float32x4_t vcvtq_high_f32_bf16(bfloat16x8_t __p0) { float32x4_t __ret; __ret = vcvt_f32_bf16(vget_high_bf16(__p0)); return __ret; } #else -__ai float32x4_t vcvtq_high_f32_bf16(bfloat16x8_t __p0) { +__ai __attribute__((target("bf16"))) float32x4_t vcvtq_high_f32_bf16(bfloat16x8_t __p0) { float32x4_t __ret; bfloat16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __noswap_vcvt_f32_bf16(__noswap_vget_high_bf16(__rev0)); @@ -68311,13 +68148,13 @@ __ai float32x4_t vcvtq_high_f32_bf16(bfloat16x8_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai float32x4_t vcvtq_low_f32_bf16(bfloat16x8_t __p0) { +__ai __attribute__((target("bf16"))) float32x4_t vcvtq_low_f32_bf16(bfloat16x8_t __p0) { float32x4_t __ret; __ret = vcvt_f32_bf16(vget_low_bf16(__p0)); return __ret; } #else -__ai float32x4_t vcvtq_low_f32_bf16(bfloat16x8_t __p0) { +__ai __attribute__((target("bf16"))) float32x4_t vcvtq_low_f32_bf16(bfloat16x8_t __p0) { float32x4_t __ret; bfloat16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __noswap_vcvt_f32_bf16(__noswap_vget_low_bf16(__rev0)); @@ -68326,487 +68163,58 @@ __ai float32x4_t vcvtq_low_f32_bf16(bfloat16x8_t __p0) { } #endif -#endif -#if defined(__ARM_FEATURE_FP16_FML) && defined(__aarch64__) #ifdef __LITTLE_ENDIAN__ -#define vfmlalq_lane_high_f16(__p0_885, __p1_885, __p2_885, __p3_885) __extension__ ({ \ - float32x4_t __ret_885; \ - float32x4_t __s0_885 = __p0_885; \ - float16x8_t __s1_885 = __p1_885; \ - float16x4_t __s2_885 = __p2_885; \ - __ret_885 = vfmlalq_high_f16(__s0_885, __s1_885, (float16x8_t) {vget_lane_f16(__s2_885, __p3_885), vget_lane_f16(__s2_885, __p3_885), vget_lane_f16(__s2_885, __p3_885), vget_lane_f16(__s2_885, __p3_885), vget_lane_f16(__s2_885, __p3_885), vget_lane_f16(__s2_885, __p3_885), vget_lane_f16(__s2_885, __p3_885), vget_lane_f16(__s2_885, __p3_885)}); \ - __ret_885; \ +#define vsudotq_lane_s32(__p0_881, __p1_881, __p2_881, __p3_881) __extension__ ({ \ + int32x4_t __ret_881; \ + int32x4_t __s0_881 = __p0_881; \ + int8x16_t __s1_881 = __p1_881; \ + uint8x8_t __s2_881 = __p2_881; \ +uint8x8_t __reint_881 = __s2_881; \ + __ret_881 = vusdotq_s32(__s0_881, (uint8x16_t)(splatq_lane_s32(*(int32x2_t *) &__reint_881, __p3_881)), __s1_881); \ + __ret_881; \ }) #else -#define vfmlalq_lane_high_f16(__p0_886, __p1_886, __p2_886, __p3_886) __extension__ ({ \ - float32x4_t __ret_886; \ - float32x4_t __s0_886 = __p0_886; \ - float16x8_t __s1_886 = __p1_886; \ - float16x4_t __s2_886 = __p2_886; \ - float32x4_t __rev0_886; __rev0_886 = __builtin_shufflevector(__s0_886, __s0_886, 3, 2, 1, 0); \ - float16x8_t __rev1_886; __rev1_886 = __builtin_shufflevector(__s1_886, __s1_886, 7, 6, 5, 4, 3, 2, 1, 0); \ - float16x4_t __rev2_886; __rev2_886 = __builtin_shufflevector(__s2_886, __s2_886, 3, 2, 1, 0); \ - __ret_886 = __noswap_vfmlalq_high_f16(__rev0_886, __rev1_886, (float16x8_t) {__noswap_vget_lane_f16(__rev2_886, __p3_886), __noswap_vget_lane_f16(__rev2_886, __p3_886), __noswap_vget_lane_f16(__rev2_886, __p3_886), __noswap_vget_lane_f16(__rev2_886, __p3_886), __noswap_vget_lane_f16(__rev2_886, __p3_886), __noswap_vget_lane_f16(__rev2_886, __p3_886), __noswap_vget_lane_f16(__rev2_886, __p3_886), __noswap_vget_lane_f16(__rev2_886, __p3_886)}); \ - __ret_886 = __builtin_shufflevector(__ret_886, __ret_886, 3, 2, 1, 0); \ - __ret_886; \ +#define vsudotq_lane_s32(__p0_882, __p1_882, __p2_882, __p3_882) __extension__ ({ \ + int32x4_t __ret_882; \ + int32x4_t __s0_882 = __p0_882; \ + int8x16_t __s1_882 = __p1_882; \ + uint8x8_t __s2_882 = __p2_882; \ + int32x4_t __rev0_882; __rev0_882 = __builtin_shufflevector(__s0_882, __s0_882, 3, 2, 1, 0); \ + int8x16_t __rev1_882; __rev1_882 = __builtin_shufflevector(__s1_882, __s1_882, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint8x8_t __rev2_882; __rev2_882 = __builtin_shufflevector(__s2_882, __s2_882, 7, 6, 5, 4, 3, 2, 1, 0); \ +uint8x8_t __reint_882 = __rev2_882; \ + __ret_882 = __noswap_vusdotq_s32(__rev0_882, (uint8x16_t)(__noswap_splatq_lane_s32(*(int32x2_t *) &__reint_882, __p3_882)), __rev1_882); \ + __ret_882 = __builtin_shufflevector(__ret_882, __ret_882, 3, 2, 1, 0); \ + __ret_882; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vfmlal_lane_high_f16(__p0_887, __p1_887, __p2_887, __p3_887) __extension__ ({ \ - float32x2_t __ret_887; \ - float32x2_t __s0_887 = __p0_887; \ - float16x4_t __s1_887 = __p1_887; \ - float16x4_t __s2_887 = __p2_887; \ - __ret_887 = vfmlal_high_f16(__s0_887, __s1_887, (float16x4_t) {vget_lane_f16(__s2_887, __p3_887), vget_lane_f16(__s2_887, __p3_887), vget_lane_f16(__s2_887, __p3_887), vget_lane_f16(__s2_887, __p3_887)}); \ - __ret_887; \ +#define vsudot_lane_s32(__p0_883, __p1_883, __p2_883, __p3_883) __extension__ ({ \ + int32x2_t __ret_883; \ + int32x2_t __s0_883 = __p0_883; \ + int8x8_t __s1_883 = __p1_883; \ + uint8x8_t __s2_883 = __p2_883; \ +uint8x8_t __reint_883 = __s2_883; \ + __ret_883 = vusdot_s32(__s0_883, (uint8x8_t)(splat_lane_s32(*(int32x2_t *) &__reint_883, __p3_883)), __s1_883); \ + __ret_883; \ }) #else -#define vfmlal_lane_high_f16(__p0_888, __p1_888, __p2_888, __p3_888) __extension__ ({ \ - float32x2_t __ret_888; \ - float32x2_t __s0_888 = __p0_888; \ - float16x4_t __s1_888 = __p1_888; \ - float16x4_t __s2_888 = __p2_888; \ - float32x2_t __rev0_888; __rev0_888 = __builtin_shufflevector(__s0_888, __s0_888, 1, 0); \ - float16x4_t __rev1_888; __rev1_888 = __builtin_shufflevector(__s1_888, __s1_888, 3, 2, 1, 0); \ - float16x4_t __rev2_888; __rev2_888 = __builtin_shufflevector(__s2_888, __s2_888, 3, 2, 1, 0); \ - __ret_888 = __noswap_vfmlal_high_f16(__rev0_888, __rev1_888, (float16x4_t) {__noswap_vget_lane_f16(__rev2_888, __p3_888), __noswap_vget_lane_f16(__rev2_888, __p3_888), __noswap_vget_lane_f16(__rev2_888, __p3_888), __noswap_vget_lane_f16(__rev2_888, __p3_888)}); \ - __ret_888 = __builtin_shufflevector(__ret_888, __ret_888, 1, 0); \ - __ret_888; \ +#define vsudot_lane_s32(__p0_884, __p1_884, __p2_884, __p3_884) __extension__ ({ \ + int32x2_t __ret_884; \ + int32x2_t __s0_884 = __p0_884; \ + int8x8_t __s1_884 = __p1_884; \ + uint8x8_t __s2_884 = __p2_884; \ + int32x2_t __rev0_884; __rev0_884 = __builtin_shufflevector(__s0_884, __s0_884, 1, 0); \ + int8x8_t __rev1_884; __rev1_884 = __builtin_shufflevector(__s1_884, __s1_884, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint8x8_t __rev2_884; __rev2_884 = __builtin_shufflevector(__s2_884, __s2_884, 7, 6, 5, 4, 3, 2, 1, 0); \ +uint8x8_t __reint_884 = __rev2_884; \ + __ret_884 = __noswap_vusdot_s32(__rev0_884, (uint8x8_t)(__noswap_splat_lane_s32(*(int32x2_t *) &__reint_884, __p3_884)), __rev1_884); \ + __ret_884 = __builtin_shufflevector(__ret_884, __ret_884, 1, 0); \ + __ret_884; \ }) #endif -#ifdef __LITTLE_ENDIAN__ -#define vfmlalq_lane_low_f16(__p0_889, __p1_889, __p2_889, __p3_889) __extension__ ({ \ - float32x4_t __ret_889; \ - float32x4_t __s0_889 = __p0_889; \ - float16x8_t __s1_889 = __p1_889; \ - float16x4_t __s2_889 = __p2_889; \ - __ret_889 = vfmlalq_low_f16(__s0_889, __s1_889, (float16x8_t) {vget_lane_f16(__s2_889, __p3_889), vget_lane_f16(__s2_889, __p3_889), vget_lane_f16(__s2_889, __p3_889), vget_lane_f16(__s2_889, __p3_889), vget_lane_f16(__s2_889, __p3_889), vget_lane_f16(__s2_889, __p3_889), vget_lane_f16(__s2_889, __p3_889), vget_lane_f16(__s2_889, __p3_889)}); \ - __ret_889; \ -}) -#else -#define vfmlalq_lane_low_f16(__p0_890, __p1_890, __p2_890, __p3_890) __extension__ ({ \ - float32x4_t __ret_890; \ - float32x4_t __s0_890 = __p0_890; \ - float16x8_t __s1_890 = __p1_890; \ - float16x4_t __s2_890 = __p2_890; \ - float32x4_t __rev0_890; __rev0_890 = __builtin_shufflevector(__s0_890, __s0_890, 3, 2, 1, 0); \ - float16x8_t __rev1_890; __rev1_890 = __builtin_shufflevector(__s1_890, __s1_890, 7, 6, 5, 4, 3, 2, 1, 0); \ - float16x4_t __rev2_890; __rev2_890 = __builtin_shufflevector(__s2_890, __s2_890, 3, 2, 1, 0); \ - __ret_890 = __noswap_vfmlalq_low_f16(__rev0_890, __rev1_890, (float16x8_t) {__noswap_vget_lane_f16(__rev2_890, __p3_890), __noswap_vget_lane_f16(__rev2_890, __p3_890), __noswap_vget_lane_f16(__rev2_890, __p3_890), __noswap_vget_lane_f16(__rev2_890, __p3_890), __noswap_vget_lane_f16(__rev2_890, __p3_890), __noswap_vget_lane_f16(__rev2_890, __p3_890), __noswap_vget_lane_f16(__rev2_890, __p3_890), __noswap_vget_lane_f16(__rev2_890, __p3_890)}); \ - __ret_890 = __builtin_shufflevector(__ret_890, __ret_890, 3, 2, 1, 0); \ - __ret_890; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vfmlal_lane_low_f16(__p0_891, __p1_891, __p2_891, __p3_891) __extension__ ({ \ - float32x2_t __ret_891; \ - float32x2_t __s0_891 = __p0_891; \ - float16x4_t __s1_891 = __p1_891; \ - float16x4_t __s2_891 = __p2_891; \ - __ret_891 = vfmlal_low_f16(__s0_891, __s1_891, (float16x4_t) {vget_lane_f16(__s2_891, __p3_891), vget_lane_f16(__s2_891, __p3_891), vget_lane_f16(__s2_891, __p3_891), vget_lane_f16(__s2_891, __p3_891)}); \ - __ret_891; \ -}) -#else -#define vfmlal_lane_low_f16(__p0_892, __p1_892, __p2_892, __p3_892) __extension__ ({ \ - float32x2_t __ret_892; \ - float32x2_t __s0_892 = __p0_892; \ - float16x4_t __s1_892 = __p1_892; \ - float16x4_t __s2_892 = __p2_892; \ - float32x2_t __rev0_892; __rev0_892 = __builtin_shufflevector(__s0_892, __s0_892, 1, 0); \ - float16x4_t __rev1_892; __rev1_892 = __builtin_shufflevector(__s1_892, __s1_892, 3, 2, 1, 0); \ - float16x4_t __rev2_892; __rev2_892 = __builtin_shufflevector(__s2_892, __s2_892, 3, 2, 1, 0); \ - __ret_892 = __noswap_vfmlal_low_f16(__rev0_892, __rev1_892, (float16x4_t) {__noswap_vget_lane_f16(__rev2_892, __p3_892), __noswap_vget_lane_f16(__rev2_892, __p3_892), __noswap_vget_lane_f16(__rev2_892, __p3_892), __noswap_vget_lane_f16(__rev2_892, __p3_892)}); \ - __ret_892 = __builtin_shufflevector(__ret_892, __ret_892, 1, 0); \ - __ret_892; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vfmlalq_laneq_high_f16(__p0_893, __p1_893, __p2_893, __p3_893) __extension__ ({ \ - float32x4_t __ret_893; \ - float32x4_t __s0_893 = __p0_893; \ - float16x8_t __s1_893 = __p1_893; \ - float16x8_t __s2_893 = __p2_893; \ - __ret_893 = vfmlalq_high_f16(__s0_893, __s1_893, (float16x8_t) {vgetq_lane_f16(__s2_893, __p3_893), vgetq_lane_f16(__s2_893, __p3_893), vgetq_lane_f16(__s2_893, __p3_893), vgetq_lane_f16(__s2_893, __p3_893), vgetq_lane_f16(__s2_893, __p3_893), vgetq_lane_f16(__s2_893, __p3_893), vgetq_lane_f16(__s2_893, __p3_893), vgetq_lane_f16(__s2_893, __p3_893)}); \ - __ret_893; \ -}) -#else -#define vfmlalq_laneq_high_f16(__p0_894, __p1_894, __p2_894, __p3_894) __extension__ ({ \ - float32x4_t __ret_894; \ - float32x4_t __s0_894 = __p0_894; \ - float16x8_t __s1_894 = __p1_894; \ - float16x8_t __s2_894 = __p2_894; \ - float32x4_t __rev0_894; __rev0_894 = __builtin_shufflevector(__s0_894, __s0_894, 3, 2, 1, 0); \ - float16x8_t __rev1_894; __rev1_894 = __builtin_shufflevector(__s1_894, __s1_894, 7, 6, 5, 4, 3, 2, 1, 0); \ - float16x8_t __rev2_894; __rev2_894 = __builtin_shufflevector(__s2_894, __s2_894, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_894 = __noswap_vfmlalq_high_f16(__rev0_894, __rev1_894, (float16x8_t) {__noswap_vgetq_lane_f16(__rev2_894, __p3_894), __noswap_vgetq_lane_f16(__rev2_894, __p3_894), __noswap_vgetq_lane_f16(__rev2_894, __p3_894), __noswap_vgetq_lane_f16(__rev2_894, __p3_894), __noswap_vgetq_lane_f16(__rev2_894, __p3_894), __noswap_vgetq_lane_f16(__rev2_894, __p3_894), __noswap_vgetq_lane_f16(__rev2_894, __p3_894), __noswap_vgetq_lane_f16(__rev2_894, __p3_894)}); \ - __ret_894 = __builtin_shufflevector(__ret_894, __ret_894, 3, 2, 1, 0); \ - __ret_894; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vfmlal_laneq_high_f16(__p0_895, __p1_895, __p2_895, __p3_895) __extension__ ({ \ - float32x2_t __ret_895; \ - float32x2_t __s0_895 = __p0_895; \ - float16x4_t __s1_895 = __p1_895; \ - float16x8_t __s2_895 = __p2_895; \ - __ret_895 = vfmlal_high_f16(__s0_895, __s1_895, (float16x4_t) {vgetq_lane_f16(__s2_895, __p3_895), vgetq_lane_f16(__s2_895, __p3_895), vgetq_lane_f16(__s2_895, __p3_895), vgetq_lane_f16(__s2_895, __p3_895)}); \ - __ret_895; \ -}) -#else -#define vfmlal_laneq_high_f16(__p0_896, __p1_896, __p2_896, __p3_896) __extension__ ({ \ - float32x2_t __ret_896; \ - float32x2_t __s0_896 = __p0_896; \ - float16x4_t __s1_896 = __p1_896; \ - float16x8_t __s2_896 = __p2_896; \ - float32x2_t __rev0_896; __rev0_896 = __builtin_shufflevector(__s0_896, __s0_896, 1, 0); \ - float16x4_t __rev1_896; __rev1_896 = __builtin_shufflevector(__s1_896, __s1_896, 3, 2, 1, 0); \ - float16x8_t __rev2_896; __rev2_896 = __builtin_shufflevector(__s2_896, __s2_896, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_896 = __noswap_vfmlal_high_f16(__rev0_896, __rev1_896, (float16x4_t) {__noswap_vgetq_lane_f16(__rev2_896, __p3_896), __noswap_vgetq_lane_f16(__rev2_896, __p3_896), __noswap_vgetq_lane_f16(__rev2_896, __p3_896), __noswap_vgetq_lane_f16(__rev2_896, __p3_896)}); \ - __ret_896 = __builtin_shufflevector(__ret_896, __ret_896, 1, 0); \ - __ret_896; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vfmlalq_laneq_low_f16(__p0_897, __p1_897, __p2_897, __p3_897) __extension__ ({ \ - float32x4_t __ret_897; \ - float32x4_t __s0_897 = __p0_897; \ - float16x8_t __s1_897 = __p1_897; \ - float16x8_t __s2_897 = __p2_897; \ - __ret_897 = vfmlalq_low_f16(__s0_897, __s1_897, (float16x8_t) {vgetq_lane_f16(__s2_897, __p3_897), vgetq_lane_f16(__s2_897, __p3_897), vgetq_lane_f16(__s2_897, __p3_897), vgetq_lane_f16(__s2_897, __p3_897), vgetq_lane_f16(__s2_897, __p3_897), vgetq_lane_f16(__s2_897, __p3_897), vgetq_lane_f16(__s2_897, __p3_897), vgetq_lane_f16(__s2_897, __p3_897)}); \ - __ret_897; \ -}) -#else -#define vfmlalq_laneq_low_f16(__p0_898, __p1_898, __p2_898, __p3_898) __extension__ ({ \ - float32x4_t __ret_898; \ - float32x4_t __s0_898 = __p0_898; \ - float16x8_t __s1_898 = __p1_898; \ - float16x8_t __s2_898 = __p2_898; \ - float32x4_t __rev0_898; __rev0_898 = __builtin_shufflevector(__s0_898, __s0_898, 3, 2, 1, 0); \ - float16x8_t __rev1_898; __rev1_898 = __builtin_shufflevector(__s1_898, __s1_898, 7, 6, 5, 4, 3, 2, 1, 0); \ - float16x8_t __rev2_898; __rev2_898 = __builtin_shufflevector(__s2_898, __s2_898, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_898 = __noswap_vfmlalq_low_f16(__rev0_898, __rev1_898, (float16x8_t) {__noswap_vgetq_lane_f16(__rev2_898, __p3_898), __noswap_vgetq_lane_f16(__rev2_898, __p3_898), __noswap_vgetq_lane_f16(__rev2_898, __p3_898), __noswap_vgetq_lane_f16(__rev2_898, __p3_898), __noswap_vgetq_lane_f16(__rev2_898, __p3_898), __noswap_vgetq_lane_f16(__rev2_898, __p3_898), __noswap_vgetq_lane_f16(__rev2_898, __p3_898), __noswap_vgetq_lane_f16(__rev2_898, __p3_898)}); \ - __ret_898 = __builtin_shufflevector(__ret_898, __ret_898, 3, 2, 1, 0); \ - __ret_898; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vfmlal_laneq_low_f16(__p0_899, __p1_899, __p2_899, __p3_899) __extension__ ({ \ - float32x2_t __ret_899; \ - float32x2_t __s0_899 = __p0_899; \ - float16x4_t __s1_899 = __p1_899; \ - float16x8_t __s2_899 = __p2_899; \ - __ret_899 = vfmlal_low_f16(__s0_899, __s1_899, (float16x4_t) {vgetq_lane_f16(__s2_899, __p3_899), vgetq_lane_f16(__s2_899, __p3_899), vgetq_lane_f16(__s2_899, __p3_899), vgetq_lane_f16(__s2_899, __p3_899)}); \ - __ret_899; \ -}) -#else -#define vfmlal_laneq_low_f16(__p0_900, __p1_900, __p2_900, __p3_900) __extension__ ({ \ - float32x2_t __ret_900; \ - float32x2_t __s0_900 = __p0_900; \ - float16x4_t __s1_900 = __p1_900; \ - float16x8_t __s2_900 = __p2_900; \ - float32x2_t __rev0_900; __rev0_900 = __builtin_shufflevector(__s0_900, __s0_900, 1, 0); \ - float16x4_t __rev1_900; __rev1_900 = __builtin_shufflevector(__s1_900, __s1_900, 3, 2, 1, 0); \ - float16x8_t __rev2_900; __rev2_900 = __builtin_shufflevector(__s2_900, __s2_900, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_900 = __noswap_vfmlal_low_f16(__rev0_900, __rev1_900, (float16x4_t) {__noswap_vgetq_lane_f16(__rev2_900, __p3_900), __noswap_vgetq_lane_f16(__rev2_900, __p3_900), __noswap_vgetq_lane_f16(__rev2_900, __p3_900), __noswap_vgetq_lane_f16(__rev2_900, __p3_900)}); \ - __ret_900 = __builtin_shufflevector(__ret_900, __ret_900, 1, 0); \ - __ret_900; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vfmlslq_lane_high_f16(__p0_901, __p1_901, __p2_901, __p3_901) __extension__ ({ \ - float32x4_t __ret_901; \ - float32x4_t __s0_901 = __p0_901; \ - float16x8_t __s1_901 = __p1_901; \ - float16x4_t __s2_901 = __p2_901; \ - __ret_901 = vfmlslq_high_f16(__s0_901, __s1_901, (float16x8_t) {vget_lane_f16(__s2_901, __p3_901), vget_lane_f16(__s2_901, __p3_901), vget_lane_f16(__s2_901, __p3_901), vget_lane_f16(__s2_901, __p3_901), vget_lane_f16(__s2_901, __p3_901), vget_lane_f16(__s2_901, __p3_901), vget_lane_f16(__s2_901, __p3_901), vget_lane_f16(__s2_901, __p3_901)}); \ - __ret_901; \ -}) -#else -#define vfmlslq_lane_high_f16(__p0_902, __p1_902, __p2_902, __p3_902) __extension__ ({ \ - float32x4_t __ret_902; \ - float32x4_t __s0_902 = __p0_902; \ - float16x8_t __s1_902 = __p1_902; \ - float16x4_t __s2_902 = __p2_902; \ - float32x4_t __rev0_902; __rev0_902 = __builtin_shufflevector(__s0_902, __s0_902, 3, 2, 1, 0); \ - float16x8_t __rev1_902; __rev1_902 = __builtin_shufflevector(__s1_902, __s1_902, 7, 6, 5, 4, 3, 2, 1, 0); \ - float16x4_t __rev2_902; __rev2_902 = __builtin_shufflevector(__s2_902, __s2_902, 3, 2, 1, 0); \ - __ret_902 = __noswap_vfmlslq_high_f16(__rev0_902, __rev1_902, (float16x8_t) {__noswap_vget_lane_f16(__rev2_902, __p3_902), __noswap_vget_lane_f16(__rev2_902, __p3_902), __noswap_vget_lane_f16(__rev2_902, __p3_902), __noswap_vget_lane_f16(__rev2_902, __p3_902), __noswap_vget_lane_f16(__rev2_902, __p3_902), __noswap_vget_lane_f16(__rev2_902, __p3_902), __noswap_vget_lane_f16(__rev2_902, __p3_902), __noswap_vget_lane_f16(__rev2_902, __p3_902)}); \ - __ret_902 = __builtin_shufflevector(__ret_902, __ret_902, 3, 2, 1, 0); \ - __ret_902; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vfmlsl_lane_high_f16(__p0_903, __p1_903, __p2_903, __p3_903) __extension__ ({ \ - float32x2_t __ret_903; \ - float32x2_t __s0_903 = __p0_903; \ - float16x4_t __s1_903 = __p1_903; \ - float16x4_t __s2_903 = __p2_903; \ - __ret_903 = vfmlsl_high_f16(__s0_903, __s1_903, (float16x4_t) {vget_lane_f16(__s2_903, __p3_903), vget_lane_f16(__s2_903, __p3_903), vget_lane_f16(__s2_903, __p3_903), vget_lane_f16(__s2_903, __p3_903)}); \ - __ret_903; \ -}) -#else -#define vfmlsl_lane_high_f16(__p0_904, __p1_904, __p2_904, __p3_904) __extension__ ({ \ - float32x2_t __ret_904; \ - float32x2_t __s0_904 = __p0_904; \ - float16x4_t __s1_904 = __p1_904; \ - float16x4_t __s2_904 = __p2_904; \ - float32x2_t __rev0_904; __rev0_904 = __builtin_shufflevector(__s0_904, __s0_904, 1, 0); \ - float16x4_t __rev1_904; __rev1_904 = __builtin_shufflevector(__s1_904, __s1_904, 3, 2, 1, 0); \ - float16x4_t __rev2_904; __rev2_904 = __builtin_shufflevector(__s2_904, __s2_904, 3, 2, 1, 0); \ - __ret_904 = __noswap_vfmlsl_high_f16(__rev0_904, __rev1_904, (float16x4_t) {__noswap_vget_lane_f16(__rev2_904, __p3_904), __noswap_vget_lane_f16(__rev2_904, __p3_904), __noswap_vget_lane_f16(__rev2_904, __p3_904), __noswap_vget_lane_f16(__rev2_904, __p3_904)}); \ - __ret_904 = __builtin_shufflevector(__ret_904, __ret_904, 1, 0); \ - __ret_904; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vfmlslq_lane_low_f16(__p0_905, __p1_905, __p2_905, __p3_905) __extension__ ({ \ - float32x4_t __ret_905; \ - float32x4_t __s0_905 = __p0_905; \ - float16x8_t __s1_905 = __p1_905; \ - float16x4_t __s2_905 = __p2_905; \ - __ret_905 = vfmlslq_low_f16(__s0_905, __s1_905, (float16x8_t) {vget_lane_f16(__s2_905, __p3_905), vget_lane_f16(__s2_905, __p3_905), vget_lane_f16(__s2_905, __p3_905), vget_lane_f16(__s2_905, __p3_905), vget_lane_f16(__s2_905, __p3_905), vget_lane_f16(__s2_905, __p3_905), vget_lane_f16(__s2_905, __p3_905), vget_lane_f16(__s2_905, __p3_905)}); \ - __ret_905; \ -}) -#else -#define vfmlslq_lane_low_f16(__p0_906, __p1_906, __p2_906, __p3_906) __extension__ ({ \ - float32x4_t __ret_906; \ - float32x4_t __s0_906 = __p0_906; \ - float16x8_t __s1_906 = __p1_906; \ - float16x4_t __s2_906 = __p2_906; \ - float32x4_t __rev0_906; __rev0_906 = __builtin_shufflevector(__s0_906, __s0_906, 3, 2, 1, 0); \ - float16x8_t __rev1_906; __rev1_906 = __builtin_shufflevector(__s1_906, __s1_906, 7, 6, 5, 4, 3, 2, 1, 0); \ - float16x4_t __rev2_906; __rev2_906 = __builtin_shufflevector(__s2_906, __s2_906, 3, 2, 1, 0); \ - __ret_906 = __noswap_vfmlslq_low_f16(__rev0_906, __rev1_906, (float16x8_t) {__noswap_vget_lane_f16(__rev2_906, __p3_906), __noswap_vget_lane_f16(__rev2_906, __p3_906), __noswap_vget_lane_f16(__rev2_906, __p3_906), __noswap_vget_lane_f16(__rev2_906, __p3_906), __noswap_vget_lane_f16(__rev2_906, __p3_906), __noswap_vget_lane_f16(__rev2_906, __p3_906), __noswap_vget_lane_f16(__rev2_906, __p3_906), __noswap_vget_lane_f16(__rev2_906, __p3_906)}); \ - __ret_906 = __builtin_shufflevector(__ret_906, __ret_906, 3, 2, 1, 0); \ - __ret_906; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vfmlsl_lane_low_f16(__p0_907, __p1_907, __p2_907, __p3_907) __extension__ ({ \ - float32x2_t __ret_907; \ - float32x2_t __s0_907 = __p0_907; \ - float16x4_t __s1_907 = __p1_907; \ - float16x4_t __s2_907 = __p2_907; \ - __ret_907 = vfmlsl_low_f16(__s0_907, __s1_907, (float16x4_t) {vget_lane_f16(__s2_907, __p3_907), vget_lane_f16(__s2_907, __p3_907), vget_lane_f16(__s2_907, __p3_907), vget_lane_f16(__s2_907, __p3_907)}); \ - __ret_907; \ -}) -#else -#define vfmlsl_lane_low_f16(__p0_908, __p1_908, __p2_908, __p3_908) __extension__ ({ \ - float32x2_t __ret_908; \ - float32x2_t __s0_908 = __p0_908; \ - float16x4_t __s1_908 = __p1_908; \ - float16x4_t __s2_908 = __p2_908; \ - float32x2_t __rev0_908; __rev0_908 = __builtin_shufflevector(__s0_908, __s0_908, 1, 0); \ - float16x4_t __rev1_908; __rev1_908 = __builtin_shufflevector(__s1_908, __s1_908, 3, 2, 1, 0); \ - float16x4_t __rev2_908; __rev2_908 = __builtin_shufflevector(__s2_908, __s2_908, 3, 2, 1, 0); \ - __ret_908 = __noswap_vfmlsl_low_f16(__rev0_908, __rev1_908, (float16x4_t) {__noswap_vget_lane_f16(__rev2_908, __p3_908), __noswap_vget_lane_f16(__rev2_908, __p3_908), __noswap_vget_lane_f16(__rev2_908, __p3_908), __noswap_vget_lane_f16(__rev2_908, __p3_908)}); \ - __ret_908 = __builtin_shufflevector(__ret_908, __ret_908, 1, 0); \ - __ret_908; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vfmlslq_laneq_high_f16(__p0_909, __p1_909, __p2_909, __p3_909) __extension__ ({ \ - float32x4_t __ret_909; \ - float32x4_t __s0_909 = __p0_909; \ - float16x8_t __s1_909 = __p1_909; \ - float16x8_t __s2_909 = __p2_909; \ - __ret_909 = vfmlslq_high_f16(__s0_909, __s1_909, (float16x8_t) {vgetq_lane_f16(__s2_909, __p3_909), vgetq_lane_f16(__s2_909, __p3_909), vgetq_lane_f16(__s2_909, __p3_909), vgetq_lane_f16(__s2_909, __p3_909), vgetq_lane_f16(__s2_909, __p3_909), vgetq_lane_f16(__s2_909, __p3_909), vgetq_lane_f16(__s2_909, __p3_909), vgetq_lane_f16(__s2_909, __p3_909)}); \ - __ret_909; \ -}) -#else -#define vfmlslq_laneq_high_f16(__p0_910, __p1_910, __p2_910, __p3_910) __extension__ ({ \ - float32x4_t __ret_910; \ - float32x4_t __s0_910 = __p0_910; \ - float16x8_t __s1_910 = __p1_910; \ - float16x8_t __s2_910 = __p2_910; \ - float32x4_t __rev0_910; __rev0_910 = __builtin_shufflevector(__s0_910, __s0_910, 3, 2, 1, 0); \ - float16x8_t __rev1_910; __rev1_910 = __builtin_shufflevector(__s1_910, __s1_910, 7, 6, 5, 4, 3, 2, 1, 0); \ - float16x8_t __rev2_910; __rev2_910 = __builtin_shufflevector(__s2_910, __s2_910, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_910 = __noswap_vfmlslq_high_f16(__rev0_910, __rev1_910, (float16x8_t) {__noswap_vgetq_lane_f16(__rev2_910, __p3_910), __noswap_vgetq_lane_f16(__rev2_910, __p3_910), __noswap_vgetq_lane_f16(__rev2_910, __p3_910), __noswap_vgetq_lane_f16(__rev2_910, __p3_910), __noswap_vgetq_lane_f16(__rev2_910, __p3_910), __noswap_vgetq_lane_f16(__rev2_910, __p3_910), __noswap_vgetq_lane_f16(__rev2_910, __p3_910), __noswap_vgetq_lane_f16(__rev2_910, __p3_910)}); \ - __ret_910 = __builtin_shufflevector(__ret_910, __ret_910, 3, 2, 1, 0); \ - __ret_910; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vfmlsl_laneq_high_f16(__p0_911, __p1_911, __p2_911, __p3_911) __extension__ ({ \ - float32x2_t __ret_911; \ - float32x2_t __s0_911 = __p0_911; \ - float16x4_t __s1_911 = __p1_911; \ - float16x8_t __s2_911 = __p2_911; \ - __ret_911 = vfmlsl_high_f16(__s0_911, __s1_911, (float16x4_t) {vgetq_lane_f16(__s2_911, __p3_911), vgetq_lane_f16(__s2_911, __p3_911), vgetq_lane_f16(__s2_911, __p3_911), vgetq_lane_f16(__s2_911, __p3_911)}); \ - __ret_911; \ -}) -#else -#define vfmlsl_laneq_high_f16(__p0_912, __p1_912, __p2_912, __p3_912) __extension__ ({ \ - float32x2_t __ret_912; \ - float32x2_t __s0_912 = __p0_912; \ - float16x4_t __s1_912 = __p1_912; \ - float16x8_t __s2_912 = __p2_912; \ - float32x2_t __rev0_912; __rev0_912 = __builtin_shufflevector(__s0_912, __s0_912, 1, 0); \ - float16x4_t __rev1_912; __rev1_912 = __builtin_shufflevector(__s1_912, __s1_912, 3, 2, 1, 0); \ - float16x8_t __rev2_912; __rev2_912 = __builtin_shufflevector(__s2_912, __s2_912, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_912 = __noswap_vfmlsl_high_f16(__rev0_912, __rev1_912, (float16x4_t) {__noswap_vgetq_lane_f16(__rev2_912, __p3_912), __noswap_vgetq_lane_f16(__rev2_912, __p3_912), __noswap_vgetq_lane_f16(__rev2_912, __p3_912), __noswap_vgetq_lane_f16(__rev2_912, __p3_912)}); \ - __ret_912 = __builtin_shufflevector(__ret_912, __ret_912, 1, 0); \ - __ret_912; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vfmlslq_laneq_low_f16(__p0_913, __p1_913, __p2_913, __p3_913) __extension__ ({ \ - float32x4_t __ret_913; \ - float32x4_t __s0_913 = __p0_913; \ - float16x8_t __s1_913 = __p1_913; \ - float16x8_t __s2_913 = __p2_913; \ - __ret_913 = vfmlslq_low_f16(__s0_913, __s1_913, (float16x8_t) {vgetq_lane_f16(__s2_913, __p3_913), vgetq_lane_f16(__s2_913, __p3_913), vgetq_lane_f16(__s2_913, __p3_913), vgetq_lane_f16(__s2_913, __p3_913), vgetq_lane_f16(__s2_913, __p3_913), vgetq_lane_f16(__s2_913, __p3_913), vgetq_lane_f16(__s2_913, __p3_913), vgetq_lane_f16(__s2_913, __p3_913)}); \ - __ret_913; \ -}) -#else -#define vfmlslq_laneq_low_f16(__p0_914, __p1_914, __p2_914, __p3_914) __extension__ ({ \ - float32x4_t __ret_914; \ - float32x4_t __s0_914 = __p0_914; \ - float16x8_t __s1_914 = __p1_914; \ - float16x8_t __s2_914 = __p2_914; \ - float32x4_t __rev0_914; __rev0_914 = __builtin_shufflevector(__s0_914, __s0_914, 3, 2, 1, 0); \ - float16x8_t __rev1_914; __rev1_914 = __builtin_shufflevector(__s1_914, __s1_914, 7, 6, 5, 4, 3, 2, 1, 0); \ - float16x8_t __rev2_914; __rev2_914 = __builtin_shufflevector(__s2_914, __s2_914, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_914 = __noswap_vfmlslq_low_f16(__rev0_914, __rev1_914, (float16x8_t) {__noswap_vgetq_lane_f16(__rev2_914, __p3_914), __noswap_vgetq_lane_f16(__rev2_914, __p3_914), __noswap_vgetq_lane_f16(__rev2_914, __p3_914), __noswap_vgetq_lane_f16(__rev2_914, __p3_914), __noswap_vgetq_lane_f16(__rev2_914, __p3_914), __noswap_vgetq_lane_f16(__rev2_914, __p3_914), __noswap_vgetq_lane_f16(__rev2_914, __p3_914), __noswap_vgetq_lane_f16(__rev2_914, __p3_914)}); \ - __ret_914 = __builtin_shufflevector(__ret_914, __ret_914, 3, 2, 1, 0); \ - __ret_914; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vfmlsl_laneq_low_f16(__p0_915, __p1_915, __p2_915, __p3_915) __extension__ ({ \ - float32x2_t __ret_915; \ - float32x2_t __s0_915 = __p0_915; \ - float16x4_t __s1_915 = __p1_915; \ - float16x8_t __s2_915 = __p2_915; \ - __ret_915 = vfmlsl_low_f16(__s0_915, __s1_915, (float16x4_t) {vgetq_lane_f16(__s2_915, __p3_915), vgetq_lane_f16(__s2_915, __p3_915), vgetq_lane_f16(__s2_915, __p3_915), vgetq_lane_f16(__s2_915, __p3_915)}); \ - __ret_915; \ -}) -#else -#define vfmlsl_laneq_low_f16(__p0_916, __p1_916, __p2_916, __p3_916) __extension__ ({ \ - float32x2_t __ret_916; \ - float32x2_t __s0_916 = __p0_916; \ - float16x4_t __s1_916 = __p1_916; \ - float16x8_t __s2_916 = __p2_916; \ - float32x2_t __rev0_916; __rev0_916 = __builtin_shufflevector(__s0_916, __s0_916, 1, 0); \ - float16x4_t __rev1_916; __rev1_916 = __builtin_shufflevector(__s1_916, __s1_916, 3, 2, 1, 0); \ - float16x8_t __rev2_916; __rev2_916 = __builtin_shufflevector(__s2_916, __s2_916, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_916 = __noswap_vfmlsl_low_f16(__rev0_916, __rev1_916, (float16x4_t) {__noswap_vgetq_lane_f16(__rev2_916, __p3_916), __noswap_vgetq_lane_f16(__rev2_916, __p3_916), __noswap_vgetq_lane_f16(__rev2_916, __p3_916), __noswap_vgetq_lane_f16(__rev2_916, __p3_916)}); \ - __ret_916 = __builtin_shufflevector(__ret_916, __ret_916, 1, 0); \ - __ret_916; \ -}) -#endif - -#endif -#if defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC) && defined(__aarch64__) -#ifdef __LITTLE_ENDIAN__ -#define vmulh_lane_f16(__p0_917, __p1_917, __p2_917) __extension__ ({ \ - float16_t __ret_917; \ - float16_t __s0_917 = __p0_917; \ - float16x4_t __s1_917 = __p1_917; \ - __ret_917 = __s0_917 * vget_lane_f16(__s1_917, __p2_917); \ - __ret_917; \ -}) -#else -#define vmulh_lane_f16(__p0_918, __p1_918, __p2_918) __extension__ ({ \ - float16_t __ret_918; \ - float16_t __s0_918 = __p0_918; \ - float16x4_t __s1_918 = __p1_918; \ - float16x4_t __rev1_918; __rev1_918 = __builtin_shufflevector(__s1_918, __s1_918, 3, 2, 1, 0); \ - __ret_918 = __s0_918 * __noswap_vget_lane_f16(__rev1_918, __p2_918); \ - __ret_918; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmulh_laneq_f16(__p0_919, __p1_919, __p2_919) __extension__ ({ \ - float16_t __ret_919; \ - float16_t __s0_919 = __p0_919; \ - float16x8_t __s1_919 = __p1_919; \ - __ret_919 = __s0_919 * vgetq_lane_f16(__s1_919, __p2_919); \ - __ret_919; \ -}) -#else -#define vmulh_laneq_f16(__p0_920, __p1_920, __p2_920) __extension__ ({ \ - float16_t __ret_920; \ - float16_t __s0_920 = __p0_920; \ - float16x8_t __s1_920 = __p1_920; \ - float16x8_t __rev1_920; __rev1_920 = __builtin_shufflevector(__s1_920, __s1_920, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_920 = __s0_920 * __noswap_vgetq_lane_f16(__rev1_920, __p2_920); \ - __ret_920; \ -}) -#endif - -#endif -#if defined(__ARM_FEATURE_MATMUL_INT8) -#ifdef __LITTLE_ENDIAN__ -#define vsudotq_lane_s32(__p0_921, __p1_921, __p2_921, __p3_921) __extension__ ({ \ - int32x4_t __ret_921; \ - int32x4_t __s0_921 = __p0_921; \ - int8x16_t __s1_921 = __p1_921; \ - uint8x8_t __s2_921 = __p2_921; \ -uint8x8_t __reint_921 = __s2_921; \ - __ret_921 = vusdotq_s32(__s0_921, (uint8x16_t)(splatq_lane_s32(*(int32x2_t *) &__reint_921, __p3_921)), __s1_921); \ - __ret_921; \ -}) -#else -#define vsudotq_lane_s32(__p0_922, __p1_922, __p2_922, __p3_922) __extension__ ({ \ - int32x4_t __ret_922; \ - int32x4_t __s0_922 = __p0_922; \ - int8x16_t __s1_922 = __p1_922; \ - uint8x8_t __s2_922 = __p2_922; \ - int32x4_t __rev0_922; __rev0_922 = __builtin_shufflevector(__s0_922, __s0_922, 3, 2, 1, 0); \ - int8x16_t __rev1_922; __rev1_922 = __builtin_shufflevector(__s1_922, __s1_922, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - uint8x8_t __rev2_922; __rev2_922 = __builtin_shufflevector(__s2_922, __s2_922, 7, 6, 5, 4, 3, 2, 1, 0); \ -uint8x8_t __reint_922 = __rev2_922; \ - __ret_922 = __noswap_vusdotq_s32(__rev0_922, (uint8x16_t)(__noswap_splatq_lane_s32(*(int32x2_t *) &__reint_922, __p3_922)), __rev1_922); \ - __ret_922 = __builtin_shufflevector(__ret_922, __ret_922, 3, 2, 1, 0); \ - __ret_922; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vsudot_lane_s32(__p0_923, __p1_923, __p2_923, __p3_923) __extension__ ({ \ - int32x2_t __ret_923; \ - int32x2_t __s0_923 = __p0_923; \ - int8x8_t __s1_923 = __p1_923; \ - uint8x8_t __s2_923 = __p2_923; \ -uint8x8_t __reint_923 = __s2_923; \ - __ret_923 = vusdot_s32(__s0_923, (uint8x8_t)(splat_lane_s32(*(int32x2_t *) &__reint_923, __p3_923)), __s1_923); \ - __ret_923; \ -}) -#else -#define vsudot_lane_s32(__p0_924, __p1_924, __p2_924, __p3_924) __extension__ ({ \ - int32x2_t __ret_924; \ - int32x2_t __s0_924 = __p0_924; \ - int8x8_t __s1_924 = __p1_924; \ - uint8x8_t __s2_924 = __p2_924; \ - int32x2_t __rev0_924; __rev0_924 = __builtin_shufflevector(__s0_924, __s0_924, 1, 0); \ - int8x8_t __rev1_924; __rev1_924 = __builtin_shufflevector(__s1_924, __s1_924, 7, 6, 5, 4, 3, 2, 1, 0); \ - uint8x8_t __rev2_924; __rev2_924 = __builtin_shufflevector(__s2_924, __s2_924, 7, 6, 5, 4, 3, 2, 1, 0); \ -uint8x8_t __reint_924 = __rev2_924; \ - __ret_924 = __noswap_vusdot_s32(__rev0_924, (uint8x8_t)(__noswap_splat_lane_s32(*(int32x2_t *) &__reint_924, __p3_924)), __rev1_924); \ - __ret_924 = __builtin_shufflevector(__ret_924, __ret_924, 1, 0); \ - __ret_924; \ -}) -#endif - -#endif #if defined(__aarch64__) #ifdef __LITTLE_ENDIAN__ __ai uint16x8_t vabdl_high_u8(uint8x16_t __p0, uint8x16_t __p1) { @@ -69115,136 +68523,136 @@ __ai int32x4_t vaddw_high_s16(int32x4_t __p0, int16x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -#define vcopyq_lane_p64(__p0_925, __p1_925, __p2_925, __p3_925) __extension__ ({ \ - poly64x2_t __ret_925; \ - poly64x2_t __s0_925 = __p0_925; \ - poly64x1_t __s2_925 = __p2_925; \ - __ret_925 = vsetq_lane_p64(vget_lane_p64(__s2_925, __p3_925), __s0_925, __p1_925); \ - __ret_925; \ +#define vcopyq_lane_p64(__p0_885, __p1_885, __p2_885, __p3_885) __extension__ ({ \ + poly64x2_t __ret_885; \ + poly64x2_t __s0_885 = __p0_885; \ + poly64x1_t __s2_885 = __p2_885; \ + __ret_885 = vsetq_lane_p64(vget_lane_p64(__s2_885, __p3_885), __s0_885, __p1_885); \ + __ret_885; \ }) #else -#define vcopyq_lane_p64(__p0_926, __p1_926, __p2_926, __p3_926) __extension__ ({ \ - poly64x2_t __ret_926; \ - poly64x2_t __s0_926 = __p0_926; \ - poly64x1_t __s2_926 = __p2_926; \ - poly64x2_t __rev0_926; __rev0_926 = __builtin_shufflevector(__s0_926, __s0_926, 1, 0); \ - __ret_926 = __noswap_vsetq_lane_p64(vget_lane_p64(__s2_926, __p3_926), __rev0_926, __p1_926); \ - __ret_926 = __builtin_shufflevector(__ret_926, __ret_926, 1, 0); \ - __ret_926; \ +#define vcopyq_lane_p64(__p0_886, __p1_886, __p2_886, __p3_886) __extension__ ({ \ + poly64x2_t __ret_886; \ + poly64x2_t __s0_886 = __p0_886; \ + poly64x1_t __s2_886 = __p2_886; \ + poly64x2_t __rev0_886; __rev0_886 = __builtin_shufflevector(__s0_886, __s0_886, 1, 0); \ + __ret_886 = __noswap_vsetq_lane_p64(vget_lane_p64(__s2_886, __p3_886), __rev0_886, __p1_886); \ + __ret_886 = __builtin_shufflevector(__ret_886, __ret_886, 1, 0); \ + __ret_886; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vcopyq_lane_f64(__p0_927, __p1_927, __p2_927, __p3_927) __extension__ ({ \ - float64x2_t __ret_927; \ - float64x2_t __s0_927 = __p0_927; \ - float64x1_t __s2_927 = __p2_927; \ - __ret_927 = vsetq_lane_f64(vget_lane_f64(__s2_927, __p3_927), __s0_927, __p1_927); \ - __ret_927; \ +#define vcopyq_lane_f64(__p0_887, __p1_887, __p2_887, __p3_887) __extension__ ({ \ + float64x2_t __ret_887; \ + float64x2_t __s0_887 = __p0_887; \ + float64x1_t __s2_887 = __p2_887; \ + __ret_887 = vsetq_lane_f64(vget_lane_f64(__s2_887, __p3_887), __s0_887, __p1_887); \ + __ret_887; \ }) #else -#define vcopyq_lane_f64(__p0_928, __p1_928, __p2_928, __p3_928) __extension__ ({ \ - float64x2_t __ret_928; \ - float64x2_t __s0_928 = __p0_928; \ - float64x1_t __s2_928 = __p2_928; \ - float64x2_t __rev0_928; __rev0_928 = __builtin_shufflevector(__s0_928, __s0_928, 1, 0); \ - __ret_928 = __noswap_vsetq_lane_f64(vget_lane_f64(__s2_928, __p3_928), __rev0_928, __p1_928); \ - __ret_928 = __builtin_shufflevector(__ret_928, __ret_928, 1, 0); \ - __ret_928; \ +#define vcopyq_lane_f64(__p0_888, __p1_888, __p2_888, __p3_888) __extension__ ({ \ + float64x2_t __ret_888; \ + float64x2_t __s0_888 = __p0_888; \ + float64x1_t __s2_888 = __p2_888; \ + float64x2_t __rev0_888; __rev0_888 = __builtin_shufflevector(__s0_888, __s0_888, 1, 0); \ + __ret_888 = __noswap_vsetq_lane_f64(vget_lane_f64(__s2_888, __p3_888), __rev0_888, __p1_888); \ + __ret_888 = __builtin_shufflevector(__ret_888, __ret_888, 1, 0); \ + __ret_888; \ }) #endif -#define vcopy_lane_p64(__p0_929, __p1_929, __p2_929, __p3_929) __extension__ ({ \ - poly64x1_t __ret_929; \ - poly64x1_t __s0_929 = __p0_929; \ - poly64x1_t __s2_929 = __p2_929; \ - __ret_929 = vset_lane_p64(vget_lane_p64(__s2_929, __p3_929), __s0_929, __p1_929); \ - __ret_929; \ +#define vcopy_lane_p64(__p0_889, __p1_889, __p2_889, __p3_889) __extension__ ({ \ + poly64x1_t __ret_889; \ + poly64x1_t __s0_889 = __p0_889; \ + poly64x1_t __s2_889 = __p2_889; \ + __ret_889 = vset_lane_p64(vget_lane_p64(__s2_889, __p3_889), __s0_889, __p1_889); \ + __ret_889; \ }) -#define vcopy_lane_f64(__p0_930, __p1_930, __p2_930, __p3_930) __extension__ ({ \ - float64x1_t __ret_930; \ - float64x1_t __s0_930 = __p0_930; \ - float64x1_t __s2_930 = __p2_930; \ - __ret_930 = vset_lane_f64(vget_lane_f64(__s2_930, __p3_930), __s0_930, __p1_930); \ - __ret_930; \ +#define vcopy_lane_f64(__p0_890, __p1_890, __p2_890, __p3_890) __extension__ ({ \ + float64x1_t __ret_890; \ + float64x1_t __s0_890 = __p0_890; \ + float64x1_t __s2_890 = __p2_890; \ + __ret_890 = vset_lane_f64(vget_lane_f64(__s2_890, __p3_890), __s0_890, __p1_890); \ + __ret_890; \ }) #ifdef __LITTLE_ENDIAN__ -#define vcopyq_laneq_p64(__p0_931, __p1_931, __p2_931, __p3_931) __extension__ ({ \ - poly64x2_t __ret_931; \ - poly64x2_t __s0_931 = __p0_931; \ - poly64x2_t __s2_931 = __p2_931; \ - __ret_931 = vsetq_lane_p64(vgetq_lane_p64(__s2_931, __p3_931), __s0_931, __p1_931); \ - __ret_931; \ +#define vcopyq_laneq_p64(__p0_891, __p1_891, __p2_891, __p3_891) __extension__ ({ \ + poly64x2_t __ret_891; \ + poly64x2_t __s0_891 = __p0_891; \ + poly64x2_t __s2_891 = __p2_891; \ + __ret_891 = vsetq_lane_p64(vgetq_lane_p64(__s2_891, __p3_891), __s0_891, __p1_891); \ + __ret_891; \ }) #else -#define vcopyq_laneq_p64(__p0_932, __p1_932, __p2_932, __p3_932) __extension__ ({ \ - poly64x2_t __ret_932; \ - poly64x2_t __s0_932 = __p0_932; \ - poly64x2_t __s2_932 = __p2_932; \ - poly64x2_t __rev0_932; __rev0_932 = __builtin_shufflevector(__s0_932, __s0_932, 1, 0); \ - poly64x2_t __rev2_932; __rev2_932 = __builtin_shufflevector(__s2_932, __s2_932, 1, 0); \ - __ret_932 = __noswap_vsetq_lane_p64(__noswap_vgetq_lane_p64(__rev2_932, __p3_932), __rev0_932, __p1_932); \ - __ret_932 = __builtin_shufflevector(__ret_932, __ret_932, 1, 0); \ - __ret_932; \ +#define vcopyq_laneq_p64(__p0_892, __p1_892, __p2_892, __p3_892) __extension__ ({ \ + poly64x2_t __ret_892; \ + poly64x2_t __s0_892 = __p0_892; \ + poly64x2_t __s2_892 = __p2_892; \ + poly64x2_t __rev0_892; __rev0_892 = __builtin_shufflevector(__s0_892, __s0_892, 1, 0); \ + poly64x2_t __rev2_892; __rev2_892 = __builtin_shufflevector(__s2_892, __s2_892, 1, 0); \ + __ret_892 = __noswap_vsetq_lane_p64(__noswap_vgetq_lane_p64(__rev2_892, __p3_892), __rev0_892, __p1_892); \ + __ret_892 = __builtin_shufflevector(__ret_892, __ret_892, 1, 0); \ + __ret_892; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vcopyq_laneq_f64(__p0_933, __p1_933, __p2_933, __p3_933) __extension__ ({ \ - float64x2_t __ret_933; \ - float64x2_t __s0_933 = __p0_933; \ - float64x2_t __s2_933 = __p2_933; \ - __ret_933 = vsetq_lane_f64(vgetq_lane_f64(__s2_933, __p3_933), __s0_933, __p1_933); \ - __ret_933; \ +#define vcopyq_laneq_f64(__p0_893, __p1_893, __p2_893, __p3_893) __extension__ ({ \ + float64x2_t __ret_893; \ + float64x2_t __s0_893 = __p0_893; \ + float64x2_t __s2_893 = __p2_893; \ + __ret_893 = vsetq_lane_f64(vgetq_lane_f64(__s2_893, __p3_893), __s0_893, __p1_893); \ + __ret_893; \ }) #else -#define vcopyq_laneq_f64(__p0_934, __p1_934, __p2_934, __p3_934) __extension__ ({ \ - float64x2_t __ret_934; \ - float64x2_t __s0_934 = __p0_934; \ - float64x2_t __s2_934 = __p2_934; \ - float64x2_t __rev0_934; __rev0_934 = __builtin_shufflevector(__s0_934, __s0_934, 1, 0); \ - float64x2_t __rev2_934; __rev2_934 = __builtin_shufflevector(__s2_934, __s2_934, 1, 0); \ - __ret_934 = __noswap_vsetq_lane_f64(__noswap_vgetq_lane_f64(__rev2_934, __p3_934), __rev0_934, __p1_934); \ - __ret_934 = __builtin_shufflevector(__ret_934, __ret_934, 1, 0); \ - __ret_934; \ +#define vcopyq_laneq_f64(__p0_894, __p1_894, __p2_894, __p3_894) __extension__ ({ \ + float64x2_t __ret_894; \ + float64x2_t __s0_894 = __p0_894; \ + float64x2_t __s2_894 = __p2_894; \ + float64x2_t __rev0_894; __rev0_894 = __builtin_shufflevector(__s0_894, __s0_894, 1, 0); \ + float64x2_t __rev2_894; __rev2_894 = __builtin_shufflevector(__s2_894, __s2_894, 1, 0); \ + __ret_894 = __noswap_vsetq_lane_f64(__noswap_vgetq_lane_f64(__rev2_894, __p3_894), __rev0_894, __p1_894); \ + __ret_894 = __builtin_shufflevector(__ret_894, __ret_894, 1, 0); \ + __ret_894; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vcopy_laneq_p64(__p0_935, __p1_935, __p2_935, __p3_935) __extension__ ({ \ - poly64x1_t __ret_935; \ - poly64x1_t __s0_935 = __p0_935; \ - poly64x2_t __s2_935 = __p2_935; \ - __ret_935 = vset_lane_p64(vgetq_lane_p64(__s2_935, __p3_935), __s0_935, __p1_935); \ - __ret_935; \ +#define vcopy_laneq_p64(__p0_895, __p1_895, __p2_895, __p3_895) __extension__ ({ \ + poly64x1_t __ret_895; \ + poly64x1_t __s0_895 = __p0_895; \ + poly64x2_t __s2_895 = __p2_895; \ + __ret_895 = vset_lane_p64(vgetq_lane_p64(__s2_895, __p3_895), __s0_895, __p1_895); \ + __ret_895; \ }) #else -#define vcopy_laneq_p64(__p0_936, __p1_936, __p2_936, __p3_936) __extension__ ({ \ - poly64x1_t __ret_936; \ - poly64x1_t __s0_936 = __p0_936; \ - poly64x2_t __s2_936 = __p2_936; \ - poly64x2_t __rev2_936; __rev2_936 = __builtin_shufflevector(__s2_936, __s2_936, 1, 0); \ - __ret_936 = vset_lane_p64(__noswap_vgetq_lane_p64(__rev2_936, __p3_936), __s0_936, __p1_936); \ - __ret_936; \ +#define vcopy_laneq_p64(__p0_896, __p1_896, __p2_896, __p3_896) __extension__ ({ \ + poly64x1_t __ret_896; \ + poly64x1_t __s0_896 = __p0_896; \ + poly64x2_t __s2_896 = __p2_896; \ + poly64x2_t __rev2_896; __rev2_896 = __builtin_shufflevector(__s2_896, __s2_896, 1, 0); \ + __ret_896 = vset_lane_p64(__noswap_vgetq_lane_p64(__rev2_896, __p3_896), __s0_896, __p1_896); \ + __ret_896; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vcopy_laneq_f64(__p0_937, __p1_937, __p2_937, __p3_937) __extension__ ({ \ - float64x1_t __ret_937; \ - float64x1_t __s0_937 = __p0_937; \ - float64x2_t __s2_937 = __p2_937; \ - __ret_937 = vset_lane_f64(vgetq_lane_f64(__s2_937, __p3_937), __s0_937, __p1_937); \ - __ret_937; \ +#define vcopy_laneq_f64(__p0_897, __p1_897, __p2_897, __p3_897) __extension__ ({ \ + float64x1_t __ret_897; \ + float64x1_t __s0_897 = __p0_897; \ + float64x2_t __s2_897 = __p2_897; \ + __ret_897 = vset_lane_f64(vgetq_lane_f64(__s2_897, __p3_897), __s0_897, __p1_897); \ + __ret_897; \ }) #else -#define vcopy_laneq_f64(__p0_938, __p1_938, __p2_938, __p3_938) __extension__ ({ \ - float64x1_t __ret_938; \ - float64x1_t __s0_938 = __p0_938; \ - float64x2_t __s2_938 = __p2_938; \ - float64x2_t __rev2_938; __rev2_938 = __builtin_shufflevector(__s2_938, __s2_938, 1, 0); \ - __ret_938 = vset_lane_f64(__noswap_vgetq_lane_f64(__rev2_938, __p3_938), __s0_938, __p1_938); \ - __ret_938; \ +#define vcopy_laneq_f64(__p0_898, __p1_898, __p2_898, __p3_898) __extension__ ({ \ + float64x1_t __ret_898; \ + float64x1_t __s0_898 = __p0_898; \ + float64x2_t __s2_898 = __p2_898; \ + float64x2_t __rev2_898; __rev2_898 = __builtin_shufflevector(__s2_898, __s2_898, 1, 0); \ + __ret_898 = vset_lane_f64(__noswap_vgetq_lane_f64(__rev2_898, __p3_898), __s0_898, __p1_898); \ + __ret_898; \ }) #endif @@ -69600,38 +69008,460 @@ __ai int32x4_t vmlsl_high_n_s16(int32x4_t __p0, int16x8_t __p1, int16_t __p2) { } #endif -#define vmulx_lane_f64(__p0_939, __p1_939, __p2_939) __extension__ ({ \ - float64x1_t __ret_939; \ - float64x1_t __s0_939 = __p0_939; \ - float64x1_t __s1_939 = __p1_939; \ - float64_t __x_939 = vget_lane_f64(__s0_939, 0); \ - float64_t __y_939 = vget_lane_f64(__s1_939, __p2_939); \ - float64_t __z_939 = vmulxd_f64(__x_939, __y_939); \ - __ret_939 = vset_lane_f64(__z_939, __s0_939, __p2_939); \ - __ret_939; \ +#define vmulx_lane_f64(__p0_899, __p1_899, __p2_899) __extension__ ({ \ + float64x1_t __ret_899; \ + float64x1_t __s0_899 = __p0_899; \ + float64x1_t __s1_899 = __p1_899; \ + float64_t __x_899 = vget_lane_f64(__s0_899, 0); \ + float64_t __y_899 = vget_lane_f64(__s1_899, __p2_899); \ + float64_t __z_899 = vmulxd_f64(__x_899, __y_899); \ + __ret_899 = vset_lane_f64(__z_899, __s0_899, __p2_899); \ + __ret_899; \ }) #ifdef __LITTLE_ENDIAN__ -#define vmulx_laneq_f64(__p0_940, __p1_940, __p2_940) __extension__ ({ \ - float64x1_t __ret_940; \ - float64x1_t __s0_940 = __p0_940; \ - float64x2_t __s1_940 = __p1_940; \ - float64_t __x_940 = vget_lane_f64(__s0_940, 0); \ - float64_t __y_940 = vgetq_lane_f64(__s1_940, __p2_940); \ - float64_t __z_940 = vmulxd_f64(__x_940, __y_940); \ - __ret_940 = vset_lane_f64(__z_940, __s0_940, 0); \ - __ret_940; \ +#define vmulx_laneq_f64(__p0_900, __p1_900, __p2_900) __extension__ ({ \ + float64x1_t __ret_900; \ + float64x1_t __s0_900 = __p0_900; \ + float64x2_t __s1_900 = __p1_900; \ + float64_t __x_900 = vget_lane_f64(__s0_900, 0); \ + float64_t __y_900 = vgetq_lane_f64(__s1_900, __p2_900); \ + float64_t __z_900 = vmulxd_f64(__x_900, __y_900); \ + __ret_900 = vset_lane_f64(__z_900, __s0_900, 0); \ + __ret_900; \ }) #else -#define vmulx_laneq_f64(__p0_941, __p1_941, __p2_941) __extension__ ({ \ - float64x1_t __ret_941; \ - float64x1_t __s0_941 = __p0_941; \ - float64x2_t __s1_941 = __p1_941; \ - float64x2_t __rev1_941; __rev1_941 = __builtin_shufflevector(__s1_941, __s1_941, 1, 0); \ - float64_t __x_941 = vget_lane_f64(__s0_941, 0); \ - float64_t __y_941 = __noswap_vgetq_lane_f64(__rev1_941, __p2_941); \ - float64_t __z_941 = vmulxd_f64(__x_941, __y_941); \ - __ret_941 = vset_lane_f64(__z_941, __s0_941, 0); \ - __ret_941; \ +#define vmulx_laneq_f64(__p0_901, __p1_901, __p2_901) __extension__ ({ \ + float64x1_t __ret_901; \ + float64x1_t __s0_901 = __p0_901; \ + float64x2_t __s1_901 = __p1_901; \ + float64x2_t __rev1_901; __rev1_901 = __builtin_shufflevector(__s1_901, __s1_901, 1, 0); \ + float64_t __x_901 = vget_lane_f64(__s0_901, 0); \ + float64_t __y_901 = __noswap_vgetq_lane_f64(__rev1_901, __p2_901); \ + float64_t __z_901 = vmulxd_f64(__x_901, __y_901); \ + __ret_901 = vset_lane_f64(__z_901, __s0_901, 0); \ + __ret_901; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfmlalq_lane_high_f16(__p0_902, __p1_902, __p2_902, __p3_902) __extension__ ({ \ + float32x4_t __ret_902; \ + float32x4_t __s0_902 = __p0_902; \ + float16x8_t __s1_902 = __p1_902; \ + float16x4_t __s2_902 = __p2_902; \ + __ret_902 = vfmlalq_high_f16(__s0_902, __s1_902, (float16x8_t) {vget_lane_f16(__s2_902, __p3_902), vget_lane_f16(__s2_902, __p3_902), vget_lane_f16(__s2_902, __p3_902), vget_lane_f16(__s2_902, __p3_902), vget_lane_f16(__s2_902, __p3_902), vget_lane_f16(__s2_902, __p3_902), vget_lane_f16(__s2_902, __p3_902), vget_lane_f16(__s2_902, __p3_902)}); \ + __ret_902; \ +}) +#else +#define vfmlalq_lane_high_f16(__p0_903, __p1_903, __p2_903, __p3_903) __extension__ ({ \ + float32x4_t __ret_903; \ + float32x4_t __s0_903 = __p0_903; \ + float16x8_t __s1_903 = __p1_903; \ + float16x4_t __s2_903 = __p2_903; \ + float32x4_t __rev0_903; __rev0_903 = __builtin_shufflevector(__s0_903, __s0_903, 3, 2, 1, 0); \ + float16x8_t __rev1_903; __rev1_903 = __builtin_shufflevector(__s1_903, __s1_903, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x4_t __rev2_903; __rev2_903 = __builtin_shufflevector(__s2_903, __s2_903, 3, 2, 1, 0); \ + __ret_903 = __noswap_vfmlalq_high_f16(__rev0_903, __rev1_903, (float16x8_t) {__noswap_vget_lane_f16(__rev2_903, __p3_903), __noswap_vget_lane_f16(__rev2_903, __p3_903), __noswap_vget_lane_f16(__rev2_903, __p3_903), __noswap_vget_lane_f16(__rev2_903, __p3_903), __noswap_vget_lane_f16(__rev2_903, __p3_903), __noswap_vget_lane_f16(__rev2_903, __p3_903), __noswap_vget_lane_f16(__rev2_903, __p3_903), __noswap_vget_lane_f16(__rev2_903, __p3_903)}); \ + __ret_903 = __builtin_shufflevector(__ret_903, __ret_903, 3, 2, 1, 0); \ + __ret_903; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfmlal_lane_high_f16(__p0_904, __p1_904, __p2_904, __p3_904) __extension__ ({ \ + float32x2_t __ret_904; \ + float32x2_t __s0_904 = __p0_904; \ + float16x4_t __s1_904 = __p1_904; \ + float16x4_t __s2_904 = __p2_904; \ + __ret_904 = vfmlal_high_f16(__s0_904, __s1_904, (float16x4_t) {vget_lane_f16(__s2_904, __p3_904), vget_lane_f16(__s2_904, __p3_904), vget_lane_f16(__s2_904, __p3_904), vget_lane_f16(__s2_904, __p3_904)}); \ + __ret_904; \ +}) +#else +#define vfmlal_lane_high_f16(__p0_905, __p1_905, __p2_905, __p3_905) __extension__ ({ \ + float32x2_t __ret_905; \ + float32x2_t __s0_905 = __p0_905; \ + float16x4_t __s1_905 = __p1_905; \ + float16x4_t __s2_905 = __p2_905; \ + float32x2_t __rev0_905; __rev0_905 = __builtin_shufflevector(__s0_905, __s0_905, 1, 0); \ + float16x4_t __rev1_905; __rev1_905 = __builtin_shufflevector(__s1_905, __s1_905, 3, 2, 1, 0); \ + float16x4_t __rev2_905; __rev2_905 = __builtin_shufflevector(__s2_905, __s2_905, 3, 2, 1, 0); \ + __ret_905 = __noswap_vfmlal_high_f16(__rev0_905, __rev1_905, (float16x4_t) {__noswap_vget_lane_f16(__rev2_905, __p3_905), __noswap_vget_lane_f16(__rev2_905, __p3_905), __noswap_vget_lane_f16(__rev2_905, __p3_905), __noswap_vget_lane_f16(__rev2_905, __p3_905)}); \ + __ret_905 = __builtin_shufflevector(__ret_905, __ret_905, 1, 0); \ + __ret_905; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfmlalq_lane_low_f16(__p0_906, __p1_906, __p2_906, __p3_906) __extension__ ({ \ + float32x4_t __ret_906; \ + float32x4_t __s0_906 = __p0_906; \ + float16x8_t __s1_906 = __p1_906; \ + float16x4_t __s2_906 = __p2_906; \ + __ret_906 = vfmlalq_low_f16(__s0_906, __s1_906, (float16x8_t) {vget_lane_f16(__s2_906, __p3_906), vget_lane_f16(__s2_906, __p3_906), vget_lane_f16(__s2_906, __p3_906), vget_lane_f16(__s2_906, __p3_906), vget_lane_f16(__s2_906, __p3_906), vget_lane_f16(__s2_906, __p3_906), vget_lane_f16(__s2_906, __p3_906), vget_lane_f16(__s2_906, __p3_906)}); \ + __ret_906; \ +}) +#else +#define vfmlalq_lane_low_f16(__p0_907, __p1_907, __p2_907, __p3_907) __extension__ ({ \ + float32x4_t __ret_907; \ + float32x4_t __s0_907 = __p0_907; \ + float16x8_t __s1_907 = __p1_907; \ + float16x4_t __s2_907 = __p2_907; \ + float32x4_t __rev0_907; __rev0_907 = __builtin_shufflevector(__s0_907, __s0_907, 3, 2, 1, 0); \ + float16x8_t __rev1_907; __rev1_907 = __builtin_shufflevector(__s1_907, __s1_907, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x4_t __rev2_907; __rev2_907 = __builtin_shufflevector(__s2_907, __s2_907, 3, 2, 1, 0); \ + __ret_907 = __noswap_vfmlalq_low_f16(__rev0_907, __rev1_907, (float16x8_t) {__noswap_vget_lane_f16(__rev2_907, __p3_907), __noswap_vget_lane_f16(__rev2_907, __p3_907), __noswap_vget_lane_f16(__rev2_907, __p3_907), __noswap_vget_lane_f16(__rev2_907, __p3_907), __noswap_vget_lane_f16(__rev2_907, __p3_907), __noswap_vget_lane_f16(__rev2_907, __p3_907), __noswap_vget_lane_f16(__rev2_907, __p3_907), __noswap_vget_lane_f16(__rev2_907, __p3_907)}); \ + __ret_907 = __builtin_shufflevector(__ret_907, __ret_907, 3, 2, 1, 0); \ + __ret_907; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfmlal_lane_low_f16(__p0_908, __p1_908, __p2_908, __p3_908) __extension__ ({ \ + float32x2_t __ret_908; \ + float32x2_t __s0_908 = __p0_908; \ + float16x4_t __s1_908 = __p1_908; \ + float16x4_t __s2_908 = __p2_908; \ + __ret_908 = vfmlal_low_f16(__s0_908, __s1_908, (float16x4_t) {vget_lane_f16(__s2_908, __p3_908), vget_lane_f16(__s2_908, __p3_908), vget_lane_f16(__s2_908, __p3_908), vget_lane_f16(__s2_908, __p3_908)}); \ + __ret_908; \ +}) +#else +#define vfmlal_lane_low_f16(__p0_909, __p1_909, __p2_909, __p3_909) __extension__ ({ \ + float32x2_t __ret_909; \ + float32x2_t __s0_909 = __p0_909; \ + float16x4_t __s1_909 = __p1_909; \ + float16x4_t __s2_909 = __p2_909; \ + float32x2_t __rev0_909; __rev0_909 = __builtin_shufflevector(__s0_909, __s0_909, 1, 0); \ + float16x4_t __rev1_909; __rev1_909 = __builtin_shufflevector(__s1_909, __s1_909, 3, 2, 1, 0); \ + float16x4_t __rev2_909; __rev2_909 = __builtin_shufflevector(__s2_909, __s2_909, 3, 2, 1, 0); \ + __ret_909 = __noswap_vfmlal_low_f16(__rev0_909, __rev1_909, (float16x4_t) {__noswap_vget_lane_f16(__rev2_909, __p3_909), __noswap_vget_lane_f16(__rev2_909, __p3_909), __noswap_vget_lane_f16(__rev2_909, __p3_909), __noswap_vget_lane_f16(__rev2_909, __p3_909)}); \ + __ret_909 = __builtin_shufflevector(__ret_909, __ret_909, 1, 0); \ + __ret_909; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfmlalq_laneq_high_f16(__p0_910, __p1_910, __p2_910, __p3_910) __extension__ ({ \ + float32x4_t __ret_910; \ + float32x4_t __s0_910 = __p0_910; \ + float16x8_t __s1_910 = __p1_910; \ + float16x8_t __s2_910 = __p2_910; \ + __ret_910 = vfmlalq_high_f16(__s0_910, __s1_910, (float16x8_t) {vgetq_lane_f16(__s2_910, __p3_910), vgetq_lane_f16(__s2_910, __p3_910), vgetq_lane_f16(__s2_910, __p3_910), vgetq_lane_f16(__s2_910, __p3_910), vgetq_lane_f16(__s2_910, __p3_910), vgetq_lane_f16(__s2_910, __p3_910), vgetq_lane_f16(__s2_910, __p3_910), vgetq_lane_f16(__s2_910, __p3_910)}); \ + __ret_910; \ +}) +#else +#define vfmlalq_laneq_high_f16(__p0_911, __p1_911, __p2_911, __p3_911) __extension__ ({ \ + float32x4_t __ret_911; \ + float32x4_t __s0_911 = __p0_911; \ + float16x8_t __s1_911 = __p1_911; \ + float16x8_t __s2_911 = __p2_911; \ + float32x4_t __rev0_911; __rev0_911 = __builtin_shufflevector(__s0_911, __s0_911, 3, 2, 1, 0); \ + float16x8_t __rev1_911; __rev1_911 = __builtin_shufflevector(__s1_911, __s1_911, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x8_t __rev2_911; __rev2_911 = __builtin_shufflevector(__s2_911, __s2_911, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_911 = __noswap_vfmlalq_high_f16(__rev0_911, __rev1_911, (float16x8_t) {__noswap_vgetq_lane_f16(__rev2_911, __p3_911), __noswap_vgetq_lane_f16(__rev2_911, __p3_911), __noswap_vgetq_lane_f16(__rev2_911, __p3_911), __noswap_vgetq_lane_f16(__rev2_911, __p3_911), __noswap_vgetq_lane_f16(__rev2_911, __p3_911), __noswap_vgetq_lane_f16(__rev2_911, __p3_911), __noswap_vgetq_lane_f16(__rev2_911, __p3_911), __noswap_vgetq_lane_f16(__rev2_911, __p3_911)}); \ + __ret_911 = __builtin_shufflevector(__ret_911, __ret_911, 3, 2, 1, 0); \ + __ret_911; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfmlal_laneq_high_f16(__p0_912, __p1_912, __p2_912, __p3_912) __extension__ ({ \ + float32x2_t __ret_912; \ + float32x2_t __s0_912 = __p0_912; \ + float16x4_t __s1_912 = __p1_912; \ + float16x8_t __s2_912 = __p2_912; \ + __ret_912 = vfmlal_high_f16(__s0_912, __s1_912, (float16x4_t) {vgetq_lane_f16(__s2_912, __p3_912), vgetq_lane_f16(__s2_912, __p3_912), vgetq_lane_f16(__s2_912, __p3_912), vgetq_lane_f16(__s2_912, __p3_912)}); \ + __ret_912; \ +}) +#else +#define vfmlal_laneq_high_f16(__p0_913, __p1_913, __p2_913, __p3_913) __extension__ ({ \ + float32x2_t __ret_913; \ + float32x2_t __s0_913 = __p0_913; \ + float16x4_t __s1_913 = __p1_913; \ + float16x8_t __s2_913 = __p2_913; \ + float32x2_t __rev0_913; __rev0_913 = __builtin_shufflevector(__s0_913, __s0_913, 1, 0); \ + float16x4_t __rev1_913; __rev1_913 = __builtin_shufflevector(__s1_913, __s1_913, 3, 2, 1, 0); \ + float16x8_t __rev2_913; __rev2_913 = __builtin_shufflevector(__s2_913, __s2_913, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_913 = __noswap_vfmlal_high_f16(__rev0_913, __rev1_913, (float16x4_t) {__noswap_vgetq_lane_f16(__rev2_913, __p3_913), __noswap_vgetq_lane_f16(__rev2_913, __p3_913), __noswap_vgetq_lane_f16(__rev2_913, __p3_913), __noswap_vgetq_lane_f16(__rev2_913, __p3_913)}); \ + __ret_913 = __builtin_shufflevector(__ret_913, __ret_913, 1, 0); \ + __ret_913; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfmlalq_laneq_low_f16(__p0_914, __p1_914, __p2_914, __p3_914) __extension__ ({ \ + float32x4_t __ret_914; \ + float32x4_t __s0_914 = __p0_914; \ + float16x8_t __s1_914 = __p1_914; \ + float16x8_t __s2_914 = __p2_914; \ + __ret_914 = vfmlalq_low_f16(__s0_914, __s1_914, (float16x8_t) {vgetq_lane_f16(__s2_914, __p3_914), vgetq_lane_f16(__s2_914, __p3_914), vgetq_lane_f16(__s2_914, __p3_914), vgetq_lane_f16(__s2_914, __p3_914), vgetq_lane_f16(__s2_914, __p3_914), vgetq_lane_f16(__s2_914, __p3_914), vgetq_lane_f16(__s2_914, __p3_914), vgetq_lane_f16(__s2_914, __p3_914)}); \ + __ret_914; \ +}) +#else +#define vfmlalq_laneq_low_f16(__p0_915, __p1_915, __p2_915, __p3_915) __extension__ ({ \ + float32x4_t __ret_915; \ + float32x4_t __s0_915 = __p0_915; \ + float16x8_t __s1_915 = __p1_915; \ + float16x8_t __s2_915 = __p2_915; \ + float32x4_t __rev0_915; __rev0_915 = __builtin_shufflevector(__s0_915, __s0_915, 3, 2, 1, 0); \ + float16x8_t __rev1_915; __rev1_915 = __builtin_shufflevector(__s1_915, __s1_915, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x8_t __rev2_915; __rev2_915 = __builtin_shufflevector(__s2_915, __s2_915, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_915 = __noswap_vfmlalq_low_f16(__rev0_915, __rev1_915, (float16x8_t) {__noswap_vgetq_lane_f16(__rev2_915, __p3_915), __noswap_vgetq_lane_f16(__rev2_915, __p3_915), __noswap_vgetq_lane_f16(__rev2_915, __p3_915), __noswap_vgetq_lane_f16(__rev2_915, __p3_915), __noswap_vgetq_lane_f16(__rev2_915, __p3_915), __noswap_vgetq_lane_f16(__rev2_915, __p3_915), __noswap_vgetq_lane_f16(__rev2_915, __p3_915), __noswap_vgetq_lane_f16(__rev2_915, __p3_915)}); \ + __ret_915 = __builtin_shufflevector(__ret_915, __ret_915, 3, 2, 1, 0); \ + __ret_915; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfmlal_laneq_low_f16(__p0_916, __p1_916, __p2_916, __p3_916) __extension__ ({ \ + float32x2_t __ret_916; \ + float32x2_t __s0_916 = __p0_916; \ + float16x4_t __s1_916 = __p1_916; \ + float16x8_t __s2_916 = __p2_916; \ + __ret_916 = vfmlal_low_f16(__s0_916, __s1_916, (float16x4_t) {vgetq_lane_f16(__s2_916, __p3_916), vgetq_lane_f16(__s2_916, __p3_916), vgetq_lane_f16(__s2_916, __p3_916), vgetq_lane_f16(__s2_916, __p3_916)}); \ + __ret_916; \ +}) +#else +#define vfmlal_laneq_low_f16(__p0_917, __p1_917, __p2_917, __p3_917) __extension__ ({ \ + float32x2_t __ret_917; \ + float32x2_t __s0_917 = __p0_917; \ + float16x4_t __s1_917 = __p1_917; \ + float16x8_t __s2_917 = __p2_917; \ + float32x2_t __rev0_917; __rev0_917 = __builtin_shufflevector(__s0_917, __s0_917, 1, 0); \ + float16x4_t __rev1_917; __rev1_917 = __builtin_shufflevector(__s1_917, __s1_917, 3, 2, 1, 0); \ + float16x8_t __rev2_917; __rev2_917 = __builtin_shufflevector(__s2_917, __s2_917, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_917 = __noswap_vfmlal_low_f16(__rev0_917, __rev1_917, (float16x4_t) {__noswap_vgetq_lane_f16(__rev2_917, __p3_917), __noswap_vgetq_lane_f16(__rev2_917, __p3_917), __noswap_vgetq_lane_f16(__rev2_917, __p3_917), __noswap_vgetq_lane_f16(__rev2_917, __p3_917)}); \ + __ret_917 = __builtin_shufflevector(__ret_917, __ret_917, 1, 0); \ + __ret_917; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfmlslq_lane_high_f16(__p0_918, __p1_918, __p2_918, __p3_918) __extension__ ({ \ + float32x4_t __ret_918; \ + float32x4_t __s0_918 = __p0_918; \ + float16x8_t __s1_918 = __p1_918; \ + float16x4_t __s2_918 = __p2_918; \ + __ret_918 = vfmlslq_high_f16(__s0_918, __s1_918, (float16x8_t) {vget_lane_f16(__s2_918, __p3_918), vget_lane_f16(__s2_918, __p3_918), vget_lane_f16(__s2_918, __p3_918), vget_lane_f16(__s2_918, __p3_918), vget_lane_f16(__s2_918, __p3_918), vget_lane_f16(__s2_918, __p3_918), vget_lane_f16(__s2_918, __p3_918), vget_lane_f16(__s2_918, __p3_918)}); \ + __ret_918; \ +}) +#else +#define vfmlslq_lane_high_f16(__p0_919, __p1_919, __p2_919, __p3_919) __extension__ ({ \ + float32x4_t __ret_919; \ + float32x4_t __s0_919 = __p0_919; \ + float16x8_t __s1_919 = __p1_919; \ + float16x4_t __s2_919 = __p2_919; \ + float32x4_t __rev0_919; __rev0_919 = __builtin_shufflevector(__s0_919, __s0_919, 3, 2, 1, 0); \ + float16x8_t __rev1_919; __rev1_919 = __builtin_shufflevector(__s1_919, __s1_919, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x4_t __rev2_919; __rev2_919 = __builtin_shufflevector(__s2_919, __s2_919, 3, 2, 1, 0); \ + __ret_919 = __noswap_vfmlslq_high_f16(__rev0_919, __rev1_919, (float16x8_t) {__noswap_vget_lane_f16(__rev2_919, __p3_919), __noswap_vget_lane_f16(__rev2_919, __p3_919), __noswap_vget_lane_f16(__rev2_919, __p3_919), __noswap_vget_lane_f16(__rev2_919, __p3_919), __noswap_vget_lane_f16(__rev2_919, __p3_919), __noswap_vget_lane_f16(__rev2_919, __p3_919), __noswap_vget_lane_f16(__rev2_919, __p3_919), __noswap_vget_lane_f16(__rev2_919, __p3_919)}); \ + __ret_919 = __builtin_shufflevector(__ret_919, __ret_919, 3, 2, 1, 0); \ + __ret_919; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfmlsl_lane_high_f16(__p0_920, __p1_920, __p2_920, __p3_920) __extension__ ({ \ + float32x2_t __ret_920; \ + float32x2_t __s0_920 = __p0_920; \ + float16x4_t __s1_920 = __p1_920; \ + float16x4_t __s2_920 = __p2_920; \ + __ret_920 = vfmlsl_high_f16(__s0_920, __s1_920, (float16x4_t) {vget_lane_f16(__s2_920, __p3_920), vget_lane_f16(__s2_920, __p3_920), vget_lane_f16(__s2_920, __p3_920), vget_lane_f16(__s2_920, __p3_920)}); \ + __ret_920; \ +}) +#else +#define vfmlsl_lane_high_f16(__p0_921, __p1_921, __p2_921, __p3_921) __extension__ ({ \ + float32x2_t __ret_921; \ + float32x2_t __s0_921 = __p0_921; \ + float16x4_t __s1_921 = __p1_921; \ + float16x4_t __s2_921 = __p2_921; \ + float32x2_t __rev0_921; __rev0_921 = __builtin_shufflevector(__s0_921, __s0_921, 1, 0); \ + float16x4_t __rev1_921; __rev1_921 = __builtin_shufflevector(__s1_921, __s1_921, 3, 2, 1, 0); \ + float16x4_t __rev2_921; __rev2_921 = __builtin_shufflevector(__s2_921, __s2_921, 3, 2, 1, 0); \ + __ret_921 = __noswap_vfmlsl_high_f16(__rev0_921, __rev1_921, (float16x4_t) {__noswap_vget_lane_f16(__rev2_921, __p3_921), __noswap_vget_lane_f16(__rev2_921, __p3_921), __noswap_vget_lane_f16(__rev2_921, __p3_921), __noswap_vget_lane_f16(__rev2_921, __p3_921)}); \ + __ret_921 = __builtin_shufflevector(__ret_921, __ret_921, 1, 0); \ + __ret_921; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfmlslq_lane_low_f16(__p0_922, __p1_922, __p2_922, __p3_922) __extension__ ({ \ + float32x4_t __ret_922; \ + float32x4_t __s0_922 = __p0_922; \ + float16x8_t __s1_922 = __p1_922; \ + float16x4_t __s2_922 = __p2_922; \ + __ret_922 = vfmlslq_low_f16(__s0_922, __s1_922, (float16x8_t) {vget_lane_f16(__s2_922, __p3_922), vget_lane_f16(__s2_922, __p3_922), vget_lane_f16(__s2_922, __p3_922), vget_lane_f16(__s2_922, __p3_922), vget_lane_f16(__s2_922, __p3_922), vget_lane_f16(__s2_922, __p3_922), vget_lane_f16(__s2_922, __p3_922), vget_lane_f16(__s2_922, __p3_922)}); \ + __ret_922; \ +}) +#else +#define vfmlslq_lane_low_f16(__p0_923, __p1_923, __p2_923, __p3_923) __extension__ ({ \ + float32x4_t __ret_923; \ + float32x4_t __s0_923 = __p0_923; \ + float16x8_t __s1_923 = __p1_923; \ + float16x4_t __s2_923 = __p2_923; \ + float32x4_t __rev0_923; __rev0_923 = __builtin_shufflevector(__s0_923, __s0_923, 3, 2, 1, 0); \ + float16x8_t __rev1_923; __rev1_923 = __builtin_shufflevector(__s1_923, __s1_923, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x4_t __rev2_923; __rev2_923 = __builtin_shufflevector(__s2_923, __s2_923, 3, 2, 1, 0); \ + __ret_923 = __noswap_vfmlslq_low_f16(__rev0_923, __rev1_923, (float16x8_t) {__noswap_vget_lane_f16(__rev2_923, __p3_923), __noswap_vget_lane_f16(__rev2_923, __p3_923), __noswap_vget_lane_f16(__rev2_923, __p3_923), __noswap_vget_lane_f16(__rev2_923, __p3_923), __noswap_vget_lane_f16(__rev2_923, __p3_923), __noswap_vget_lane_f16(__rev2_923, __p3_923), __noswap_vget_lane_f16(__rev2_923, __p3_923), __noswap_vget_lane_f16(__rev2_923, __p3_923)}); \ + __ret_923 = __builtin_shufflevector(__ret_923, __ret_923, 3, 2, 1, 0); \ + __ret_923; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfmlsl_lane_low_f16(__p0_924, __p1_924, __p2_924, __p3_924) __extension__ ({ \ + float32x2_t __ret_924; \ + float32x2_t __s0_924 = __p0_924; \ + float16x4_t __s1_924 = __p1_924; \ + float16x4_t __s2_924 = __p2_924; \ + __ret_924 = vfmlsl_low_f16(__s0_924, __s1_924, (float16x4_t) {vget_lane_f16(__s2_924, __p3_924), vget_lane_f16(__s2_924, __p3_924), vget_lane_f16(__s2_924, __p3_924), vget_lane_f16(__s2_924, __p3_924)}); \ + __ret_924; \ +}) +#else +#define vfmlsl_lane_low_f16(__p0_925, __p1_925, __p2_925, __p3_925) __extension__ ({ \ + float32x2_t __ret_925; \ + float32x2_t __s0_925 = __p0_925; \ + float16x4_t __s1_925 = __p1_925; \ + float16x4_t __s2_925 = __p2_925; \ + float32x2_t __rev0_925; __rev0_925 = __builtin_shufflevector(__s0_925, __s0_925, 1, 0); \ + float16x4_t __rev1_925; __rev1_925 = __builtin_shufflevector(__s1_925, __s1_925, 3, 2, 1, 0); \ + float16x4_t __rev2_925; __rev2_925 = __builtin_shufflevector(__s2_925, __s2_925, 3, 2, 1, 0); \ + __ret_925 = __noswap_vfmlsl_low_f16(__rev0_925, __rev1_925, (float16x4_t) {__noswap_vget_lane_f16(__rev2_925, __p3_925), __noswap_vget_lane_f16(__rev2_925, __p3_925), __noswap_vget_lane_f16(__rev2_925, __p3_925), __noswap_vget_lane_f16(__rev2_925, __p3_925)}); \ + __ret_925 = __builtin_shufflevector(__ret_925, __ret_925, 1, 0); \ + __ret_925; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfmlslq_laneq_high_f16(__p0_926, __p1_926, __p2_926, __p3_926) __extension__ ({ \ + float32x4_t __ret_926; \ + float32x4_t __s0_926 = __p0_926; \ + float16x8_t __s1_926 = __p1_926; \ + float16x8_t __s2_926 = __p2_926; \ + __ret_926 = vfmlslq_high_f16(__s0_926, __s1_926, (float16x8_t) {vgetq_lane_f16(__s2_926, __p3_926), vgetq_lane_f16(__s2_926, __p3_926), vgetq_lane_f16(__s2_926, __p3_926), vgetq_lane_f16(__s2_926, __p3_926), vgetq_lane_f16(__s2_926, __p3_926), vgetq_lane_f16(__s2_926, __p3_926), vgetq_lane_f16(__s2_926, __p3_926), vgetq_lane_f16(__s2_926, __p3_926)}); \ + __ret_926; \ +}) +#else +#define vfmlslq_laneq_high_f16(__p0_927, __p1_927, __p2_927, __p3_927) __extension__ ({ \ + float32x4_t __ret_927; \ + float32x4_t __s0_927 = __p0_927; \ + float16x8_t __s1_927 = __p1_927; \ + float16x8_t __s2_927 = __p2_927; \ + float32x4_t __rev0_927; __rev0_927 = __builtin_shufflevector(__s0_927, __s0_927, 3, 2, 1, 0); \ + float16x8_t __rev1_927; __rev1_927 = __builtin_shufflevector(__s1_927, __s1_927, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x8_t __rev2_927; __rev2_927 = __builtin_shufflevector(__s2_927, __s2_927, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_927 = __noswap_vfmlslq_high_f16(__rev0_927, __rev1_927, (float16x8_t) {__noswap_vgetq_lane_f16(__rev2_927, __p3_927), __noswap_vgetq_lane_f16(__rev2_927, __p3_927), __noswap_vgetq_lane_f16(__rev2_927, __p3_927), __noswap_vgetq_lane_f16(__rev2_927, __p3_927), __noswap_vgetq_lane_f16(__rev2_927, __p3_927), __noswap_vgetq_lane_f16(__rev2_927, __p3_927), __noswap_vgetq_lane_f16(__rev2_927, __p3_927), __noswap_vgetq_lane_f16(__rev2_927, __p3_927)}); \ + __ret_927 = __builtin_shufflevector(__ret_927, __ret_927, 3, 2, 1, 0); \ + __ret_927; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfmlsl_laneq_high_f16(__p0_928, __p1_928, __p2_928, __p3_928) __extension__ ({ \ + float32x2_t __ret_928; \ + float32x2_t __s0_928 = __p0_928; \ + float16x4_t __s1_928 = __p1_928; \ + float16x8_t __s2_928 = __p2_928; \ + __ret_928 = vfmlsl_high_f16(__s0_928, __s1_928, (float16x4_t) {vgetq_lane_f16(__s2_928, __p3_928), vgetq_lane_f16(__s2_928, __p3_928), vgetq_lane_f16(__s2_928, __p3_928), vgetq_lane_f16(__s2_928, __p3_928)}); \ + __ret_928; \ +}) +#else +#define vfmlsl_laneq_high_f16(__p0_929, __p1_929, __p2_929, __p3_929) __extension__ ({ \ + float32x2_t __ret_929; \ + float32x2_t __s0_929 = __p0_929; \ + float16x4_t __s1_929 = __p1_929; \ + float16x8_t __s2_929 = __p2_929; \ + float32x2_t __rev0_929; __rev0_929 = __builtin_shufflevector(__s0_929, __s0_929, 1, 0); \ + float16x4_t __rev1_929; __rev1_929 = __builtin_shufflevector(__s1_929, __s1_929, 3, 2, 1, 0); \ + float16x8_t __rev2_929; __rev2_929 = __builtin_shufflevector(__s2_929, __s2_929, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_929 = __noswap_vfmlsl_high_f16(__rev0_929, __rev1_929, (float16x4_t) {__noswap_vgetq_lane_f16(__rev2_929, __p3_929), __noswap_vgetq_lane_f16(__rev2_929, __p3_929), __noswap_vgetq_lane_f16(__rev2_929, __p3_929), __noswap_vgetq_lane_f16(__rev2_929, __p3_929)}); \ + __ret_929 = __builtin_shufflevector(__ret_929, __ret_929, 1, 0); \ + __ret_929; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfmlslq_laneq_low_f16(__p0_930, __p1_930, __p2_930, __p3_930) __extension__ ({ \ + float32x4_t __ret_930; \ + float32x4_t __s0_930 = __p0_930; \ + float16x8_t __s1_930 = __p1_930; \ + float16x8_t __s2_930 = __p2_930; \ + __ret_930 = vfmlslq_low_f16(__s0_930, __s1_930, (float16x8_t) {vgetq_lane_f16(__s2_930, __p3_930), vgetq_lane_f16(__s2_930, __p3_930), vgetq_lane_f16(__s2_930, __p3_930), vgetq_lane_f16(__s2_930, __p3_930), vgetq_lane_f16(__s2_930, __p3_930), vgetq_lane_f16(__s2_930, __p3_930), vgetq_lane_f16(__s2_930, __p3_930), vgetq_lane_f16(__s2_930, __p3_930)}); \ + __ret_930; \ +}) +#else +#define vfmlslq_laneq_low_f16(__p0_931, __p1_931, __p2_931, __p3_931) __extension__ ({ \ + float32x4_t __ret_931; \ + float32x4_t __s0_931 = __p0_931; \ + float16x8_t __s1_931 = __p1_931; \ + float16x8_t __s2_931 = __p2_931; \ + float32x4_t __rev0_931; __rev0_931 = __builtin_shufflevector(__s0_931, __s0_931, 3, 2, 1, 0); \ + float16x8_t __rev1_931; __rev1_931 = __builtin_shufflevector(__s1_931, __s1_931, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x8_t __rev2_931; __rev2_931 = __builtin_shufflevector(__s2_931, __s2_931, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_931 = __noswap_vfmlslq_low_f16(__rev0_931, __rev1_931, (float16x8_t) {__noswap_vgetq_lane_f16(__rev2_931, __p3_931), __noswap_vgetq_lane_f16(__rev2_931, __p3_931), __noswap_vgetq_lane_f16(__rev2_931, __p3_931), __noswap_vgetq_lane_f16(__rev2_931, __p3_931), __noswap_vgetq_lane_f16(__rev2_931, __p3_931), __noswap_vgetq_lane_f16(__rev2_931, __p3_931), __noswap_vgetq_lane_f16(__rev2_931, __p3_931), __noswap_vgetq_lane_f16(__rev2_931, __p3_931)}); \ + __ret_931 = __builtin_shufflevector(__ret_931, __ret_931, 3, 2, 1, 0); \ + __ret_931; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfmlsl_laneq_low_f16(__p0_932, __p1_932, __p2_932, __p3_932) __extension__ ({ \ + float32x2_t __ret_932; \ + float32x2_t __s0_932 = __p0_932; \ + float16x4_t __s1_932 = __p1_932; \ + float16x8_t __s2_932 = __p2_932; \ + __ret_932 = vfmlsl_low_f16(__s0_932, __s1_932, (float16x4_t) {vgetq_lane_f16(__s2_932, __p3_932), vgetq_lane_f16(__s2_932, __p3_932), vgetq_lane_f16(__s2_932, __p3_932), vgetq_lane_f16(__s2_932, __p3_932)}); \ + __ret_932; \ +}) +#else +#define vfmlsl_laneq_low_f16(__p0_933, __p1_933, __p2_933, __p3_933) __extension__ ({ \ + float32x2_t __ret_933; \ + float32x2_t __s0_933 = __p0_933; \ + float16x4_t __s1_933 = __p1_933; \ + float16x8_t __s2_933 = __p2_933; \ + float32x2_t __rev0_933; __rev0_933 = __builtin_shufflevector(__s0_933, __s0_933, 1, 0); \ + float16x4_t __rev1_933; __rev1_933 = __builtin_shufflevector(__s1_933, __s1_933, 3, 2, 1, 0); \ + float16x8_t __rev2_933; __rev2_933 = __builtin_shufflevector(__s2_933, __s2_933, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_933 = __noswap_vfmlsl_low_f16(__rev0_933, __rev1_933, (float16x4_t) {__noswap_vgetq_lane_f16(__rev2_933, __p3_933), __noswap_vgetq_lane_f16(__rev2_933, __p3_933), __noswap_vgetq_lane_f16(__rev2_933, __p3_933), __noswap_vgetq_lane_f16(__rev2_933, __p3_933)}); \ + __ret_933 = __builtin_shufflevector(__ret_933, __ret_933, 1, 0); \ + __ret_933; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmulh_lane_f16(__p0_934, __p1_934, __p2_934) __extension__ ({ \ + float16_t __ret_934; \ + float16_t __s0_934 = __p0_934; \ + float16x4_t __s1_934 = __p1_934; \ + __ret_934 = __s0_934 * vget_lane_f16(__s1_934, __p2_934); \ + __ret_934; \ +}) +#else +#define vmulh_lane_f16(__p0_935, __p1_935, __p2_935) __extension__ ({ \ + float16_t __ret_935; \ + float16_t __s0_935 = __p0_935; \ + float16x4_t __s1_935 = __p1_935; \ + float16x4_t __rev1_935; __rev1_935 = __builtin_shufflevector(__s1_935, __s1_935, 3, 2, 1, 0); \ + __ret_935 = __s0_935 * __noswap_vget_lane_f16(__rev1_935, __p2_935); \ + __ret_935; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmulh_laneq_f16(__p0_936, __p1_936, __p2_936) __extension__ ({ \ + float16_t __ret_936; \ + float16_t __s0_936 = __p0_936; \ + float16x8_t __s1_936 = __p1_936; \ + __ret_936 = __s0_936 * vgetq_lane_f16(__s1_936, __p2_936); \ + __ret_936; \ +}) +#else +#define vmulh_laneq_f16(__p0_937, __p1_937, __p2_937) __extension__ ({ \ + float16_t __ret_937; \ + float16_t __s0_937 = __p0_937; \ + float16x8_t __s1_937 = __p1_937; \ + float16x8_t __rev1_937; __rev1_937 = __builtin_shufflevector(__s1_937, __s1_937, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_937 = __s0_937 * __noswap_vgetq_lane_f16(__rev1_937, __p2_937); \ + __ret_937; \ }) #endif diff --git a/lib/include/arm_neon_sve_bridge.h b/lib/include/arm_neon_sve_bridge.h new file mode 100644 index 0000000000..a9fbdbaf4b --- /dev/null +++ b/lib/include/arm_neon_sve_bridge.h @@ -0,0 +1,182 @@ +/*===---- arm_neon_sve_bridge.h - ARM NEON SVE Bridge intrinsics -----------=== + * + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ + +#ifndef __ARM_NEON_SVE_BRIDGE_H +#define __ARM_NEON_SVE_BRIDGE_H + +#include +#include + +#ifdef __cplusplus +extern "C" { +#endif + +/* Function attributes */ +#define __ai static __inline__ __attribute__((__always_inline__, __nodebug__)) +#define __aio \ + static __inline__ \ + __attribute__((__always_inline__, __nodebug__, __overloadable__)) + +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset_neonq_s8))) +svint8_t svset_neonq(svint8_t, int8x16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset_neonq_s16))) +svint16_t svset_neonq(svint16_t, int16x8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset_neonq_s32))) +svint32_t svset_neonq(svint32_t, int32x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset_neonq_s64))) +svint64_t svset_neonq(svint64_t, int64x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset_neonq_u8))) +svuint8_t svset_neonq(svuint8_t, uint8x16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset_neonq_u16))) +svuint16_t svset_neonq(svuint16_t, uint16x8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset_neonq_u32))) +svuint32_t svset_neonq(svuint32_t, uint32x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset_neonq_u64))) +svuint64_t svset_neonq(svuint64_t, uint64x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset_neonq_f16))) +svfloat16_t svset_neonq(svfloat16_t, float16x8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset_neonq_f32))) +svfloat32_t svset_neonq(svfloat32_t, float32x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset_neonq_f64))) +svfloat64_t svset_neonq(svfloat64_t, float64x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset_neonq_s8))) +svint8_t svset_neonq_s8(svint8_t, int8x16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset_neonq_s16))) +svint16_t svset_neonq_s16(svint16_t, int16x8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset_neonq_s32))) +svint32_t svset_neonq_s32(svint32_t, int32x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset_neonq_s64))) +svint64_t svset_neonq_s64(svint64_t, int64x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset_neonq_u8))) +svuint8_t svset_neonq_u8(svuint8_t, uint8x16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset_neonq_u16))) +svuint16_t svset_neonq_u16(svuint16_t, uint16x8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset_neonq_u32))) +svuint32_t svset_neonq_u32(svuint32_t, uint32x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset_neonq_u64))) +svuint64_t svset_neonq_u64(svuint64_t, uint64x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset_neonq_f16))) +svfloat16_t svset_neonq_f16(svfloat16_t, float16x8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset_neonq_f32))) +svfloat32_t svset_neonq_f32(svfloat32_t, float32x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset_neonq_f64))) +svfloat64_t svset_neonq_f64(svfloat64_t, float64x2_t); + +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget_neonq_s8))) +int8x16_t svget_neonq(svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget_neonq_s16))) +int16x8_t svget_neonq(svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget_neonq_s32))) +int32x4_t svget_neonq(svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget_neonq_s64))) +int64x2_t svget_neonq(svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget_neonq_u8))) +uint8x16_t svget_neonq(svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget_neonq_u16))) +uint16x8_t svget_neonq(svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget_neonq_u32))) +uint32x4_t svget_neonq(svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget_neonq_u64))) +uint64x2_t svget_neonq(svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget_neonq_f16))) +float16x8_t svget_neonq(svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget_neonq_f32))) +float32x4_t svget_neonq(svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget_neonq_f64))) +float64x2_t svget_neonq(svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget_neonq_s8))) +int8x16_t svget_neonq_s8(svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget_neonq_s16))) +int16x8_t svget_neonq_s16(svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget_neonq_s32))) +int32x4_t svget_neonq_s32(svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget_neonq_s64))) +int64x2_t svget_neonq_s64(svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget_neonq_u8))) +uint8x16_t svget_neonq_u8(svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget_neonq_u16))) +uint16x8_t svget_neonq_u16(svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget_neonq_u32))) +uint32x4_t svget_neonq_u32(svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget_neonq_u64))) +uint64x2_t svget_neonq_u64(svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget_neonq_f16))) +float16x8_t svget_neonq_f16(svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget_neonq_f32))) +float32x4_t svget_neonq_f32(svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget_neonq_f64))) +float64x2_t svget_neonq_f64(svfloat64_t); + +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_neonq_s8))) +svint8_t svdup_neonq(int8x16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_neonq_s16))) +svint16_t svdup_neonq(int16x8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_neonq_s32))) +svint32_t svdup_neonq(int32x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_neonq_s64))) +svint64_t svdup_neonq(int64x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_neonq_u8))) +svuint8_t svdup_neonq(uint8x16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_neonq_u16))) +svuint16_t svdup_neonq(uint16x8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_neonq_u32))) +svuint32_t svdup_neonq(uint32x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_neonq_u64))) +svuint64_t svdup_neonq(uint64x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_neonq_f16))) +svfloat16_t svdup_neonq(float16x8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_neonq_f32))) +svfloat32_t svdup_neonq(float32x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_neonq_f64))) +svfloat64_t svdup_neonq(float64x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_neonq_s8))) +svint8_t svdup_neonq_s8(int8x16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_neonq_s16))) +svint16_t svdup_neonq_s16(int16x8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_neonq_s32))) +svint32_t svdup_neonq_s32(int32x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_neonq_s64))) +svint64_t svdup_neonq_s64(int64x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_neonq_u8))) +svuint8_t svdup_neonq_u8(uint8x16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_neonq_u16))) +svuint16_t svdup_neonq_u16(uint16x8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_neonq_u32))) +svuint32_t svdup_neonq_u32(uint32x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_neonq_u64))) +svuint64_t svdup_neonq_u64(uint64x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_neonq_f16))) +svfloat16_t svdup_neonq_f16(float16x8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_neonq_f32))) +svfloat32_t svdup_neonq_f32(float32x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_neonq_f64))) +svfloat64_t svdup_neonq_f64(float64x2_t); + +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset_neonq_bf16))) +svbfloat16_t svset_neonq(svbfloat16_t, bfloat16x8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset_neonq_bf16))) +svbfloat16_t svset_neonq_bf16(svbfloat16_t, bfloat16x8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget_neonq_bf16))) +bfloat16x8_t svget_neonq(svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget_neonq_bf16))) +bfloat16x8_t svget_neonq_bf16(svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_neonq_bf16))) +svbfloat16_t svdup_neonq(bfloat16x8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_neonq_bf16))) +svbfloat16_t svdup_neonq_bf16(bfloat16x8_t); + +#undef __ai +#undef __aio + +#ifdef __cplusplus +} // extern "C" +#endif + +#endif //__ARM_NEON_SVE_BRIDGE_H diff --git a/lib/include/arm_sve.h b/lib/include/arm_sve.h index 03dc3ffdee..64362b8563 100644 --- a/lib/include/arm_sve.h +++ b/lib/include/arm_sve.h @@ -11,10 +11,6 @@ #ifndef __ARM_SVE_H #define __ARM_SVE_H -#if !defined(__ARM_FEATURE_SVE) -#error "SVE support not enabled" -#else - #if !defined(__LITTLE_ENDIAN__) #error "Big endian is currently not supported for arm_sve.h" #endif @@ -39,19 +35,9 @@ typedef __SVUint32_t svuint32_t; typedef __SVUint64_t svuint64_t; typedef __SVFloat16_t svfloat16_t; -#if defined(__ARM_FEATURE_SVE_BF16) && !defined(__ARM_FEATURE_BF16_SCALAR_ARITHMETIC) -#error "__ARM_FEATURE_BF16_SCALAR_ARITHMETIC must be defined when __ARM_FEATURE_SVE_BF16 is defined" -#endif - -#if defined(__ARM_FEATURE_SVE_BF16) typedef __SVBFloat16_t svbfloat16_t; -#endif - -#if defined(__ARM_FEATURE_BF16_SCALAR_ARITHMETIC) #include typedef __bf16 bfloat16_t; -#endif - typedef __SVFloat32_t svfloat32_t; typedef __SVFloat64_t svfloat64_t; typedef __clang_svint8x2_t svint8x2_t; @@ -89,11 +75,9 @@ typedef __clang_svfloat32x4_t svfloat32x4_t; typedef __clang_svfloat64x4_t svfloat64x4_t; typedef __SVBool_t svbool_t; -#ifdef __ARM_FEATURE_SVE_BF16 typedef __clang_svbfloat16x2_t svbfloat16x2_t; typedef __clang_svbfloat16x3_t svbfloat16x3_t; typedef __clang_svbfloat16x4_t svbfloat16x4_t; -#endif enum svpattern { SV_POW2 = 0, @@ -145,9 +129,7 @@ enum svprfop #define svreinterpret_s8_u32(...) __builtin_sve_reinterpret_s8_u32(__VA_ARGS__) #define svreinterpret_s8_u64(...) __builtin_sve_reinterpret_s8_u64(__VA_ARGS__) #define svreinterpret_s8_f16(...) __builtin_sve_reinterpret_s8_f16(__VA_ARGS__) -#if defined(__ARM_FEATURE_SVE_BF16) #define svreinterpret_s8_bf16(...) __builtin_sve_reinterpret_s8_bf16(__VA_ARGS__) -#endif /* #if defined(__ARM_FEATURE_SVE_BF16) */ #define svreinterpret_s8_f32(...) __builtin_sve_reinterpret_s8_f32(__VA_ARGS__) #define svreinterpret_s8_f64(...) __builtin_sve_reinterpret_s8_f64(__VA_ARGS__) #define svreinterpret_s16_s8(...) __builtin_sve_reinterpret_s16_s8(__VA_ARGS__) @@ -159,9 +141,7 @@ enum svprfop #define svreinterpret_s16_u32(...) __builtin_sve_reinterpret_s16_u32(__VA_ARGS__) #define svreinterpret_s16_u64(...) __builtin_sve_reinterpret_s16_u64(__VA_ARGS__) #define svreinterpret_s16_f16(...) __builtin_sve_reinterpret_s16_f16(__VA_ARGS__) -#if defined(__ARM_FEATURE_SVE_BF16) #define svreinterpret_s16_bf16(...) __builtin_sve_reinterpret_s16_bf16(__VA_ARGS__) -#endif /* #if defined(__ARM_FEATURE_SVE_BF16) */ #define svreinterpret_s16_f32(...) __builtin_sve_reinterpret_s16_f32(__VA_ARGS__) #define svreinterpret_s16_f64(...) __builtin_sve_reinterpret_s16_f64(__VA_ARGS__) #define svreinterpret_s32_s8(...) __builtin_sve_reinterpret_s32_s8(__VA_ARGS__) @@ -173,9 +153,7 @@ enum svprfop #define svreinterpret_s32_u32(...) __builtin_sve_reinterpret_s32_u32(__VA_ARGS__) #define svreinterpret_s32_u64(...) __builtin_sve_reinterpret_s32_u64(__VA_ARGS__) #define svreinterpret_s32_f16(...) __builtin_sve_reinterpret_s32_f16(__VA_ARGS__) -#if defined(__ARM_FEATURE_SVE_BF16) #define svreinterpret_s32_bf16(...) __builtin_sve_reinterpret_s32_bf16(__VA_ARGS__) -#endif /* #if defined(__ARM_FEATURE_SVE_BF16) */ #define svreinterpret_s32_f32(...) __builtin_sve_reinterpret_s32_f32(__VA_ARGS__) #define svreinterpret_s32_f64(...) __builtin_sve_reinterpret_s32_f64(__VA_ARGS__) #define svreinterpret_s64_s8(...) __builtin_sve_reinterpret_s64_s8(__VA_ARGS__) @@ -187,9 +165,7 @@ enum svprfop #define svreinterpret_s64_u32(...) __builtin_sve_reinterpret_s64_u32(__VA_ARGS__) #define svreinterpret_s64_u64(...) __builtin_sve_reinterpret_s64_u64(__VA_ARGS__) #define svreinterpret_s64_f16(...) __builtin_sve_reinterpret_s64_f16(__VA_ARGS__) -#if defined(__ARM_FEATURE_SVE_BF16) #define svreinterpret_s64_bf16(...) __builtin_sve_reinterpret_s64_bf16(__VA_ARGS__) -#endif /* #if defined(__ARM_FEATURE_SVE_BF16) */ #define svreinterpret_s64_f32(...) __builtin_sve_reinterpret_s64_f32(__VA_ARGS__) #define svreinterpret_s64_f64(...) __builtin_sve_reinterpret_s64_f64(__VA_ARGS__) #define svreinterpret_u8_s8(...) __builtin_sve_reinterpret_u8_s8(__VA_ARGS__) @@ -201,9 +177,7 @@ enum svprfop #define svreinterpret_u8_u32(...) __builtin_sve_reinterpret_u8_u32(__VA_ARGS__) #define svreinterpret_u8_u64(...) __builtin_sve_reinterpret_u8_u64(__VA_ARGS__) #define svreinterpret_u8_f16(...) __builtin_sve_reinterpret_u8_f16(__VA_ARGS__) -#if defined(__ARM_FEATURE_SVE_BF16) #define svreinterpret_u8_bf16(...) __builtin_sve_reinterpret_u8_bf16(__VA_ARGS__) -#endif /* #if defined(__ARM_FEATURE_SVE_BF16) */ #define svreinterpret_u8_f32(...) __builtin_sve_reinterpret_u8_f32(__VA_ARGS__) #define svreinterpret_u8_f64(...) __builtin_sve_reinterpret_u8_f64(__VA_ARGS__) #define svreinterpret_u16_s8(...) __builtin_sve_reinterpret_u16_s8(__VA_ARGS__) @@ -215,9 +189,7 @@ enum svprfop #define svreinterpret_u16_u32(...) __builtin_sve_reinterpret_u16_u32(__VA_ARGS__) #define svreinterpret_u16_u64(...) __builtin_sve_reinterpret_u16_u64(__VA_ARGS__) #define svreinterpret_u16_f16(...) __builtin_sve_reinterpret_u16_f16(__VA_ARGS__) -#if defined(__ARM_FEATURE_SVE_BF16) #define svreinterpret_u16_bf16(...) __builtin_sve_reinterpret_u16_bf16(__VA_ARGS__) -#endif /* #if defined(__ARM_FEATURE_SVE_BF16) */ #define svreinterpret_u16_f32(...) __builtin_sve_reinterpret_u16_f32(__VA_ARGS__) #define svreinterpret_u16_f64(...) __builtin_sve_reinterpret_u16_f64(__VA_ARGS__) #define svreinterpret_u32_s8(...) __builtin_sve_reinterpret_u32_s8(__VA_ARGS__) @@ -229,9 +201,7 @@ enum svprfop #define svreinterpret_u32_u32(...) __builtin_sve_reinterpret_u32_u32(__VA_ARGS__) #define svreinterpret_u32_u64(...) __builtin_sve_reinterpret_u32_u64(__VA_ARGS__) #define svreinterpret_u32_f16(...) __builtin_sve_reinterpret_u32_f16(__VA_ARGS__) -#if defined(__ARM_FEATURE_SVE_BF16) #define svreinterpret_u32_bf16(...) __builtin_sve_reinterpret_u32_bf16(__VA_ARGS__) -#endif /* #if defined(__ARM_FEATURE_SVE_BF16) */ #define svreinterpret_u32_f32(...) __builtin_sve_reinterpret_u32_f32(__VA_ARGS__) #define svreinterpret_u32_f64(...) __builtin_sve_reinterpret_u32_f64(__VA_ARGS__) #define svreinterpret_u64_s8(...) __builtin_sve_reinterpret_u64_s8(__VA_ARGS__) @@ -243,9 +213,7 @@ enum svprfop #define svreinterpret_u64_u32(...) __builtin_sve_reinterpret_u64_u32(__VA_ARGS__) #define svreinterpret_u64_u64(...) __builtin_sve_reinterpret_u64_u64(__VA_ARGS__) #define svreinterpret_u64_f16(...) __builtin_sve_reinterpret_u64_f16(__VA_ARGS__) -#if defined(__ARM_FEATURE_SVE_BF16) #define svreinterpret_u64_bf16(...) __builtin_sve_reinterpret_u64_bf16(__VA_ARGS__) -#endif /* #if defined(__ARM_FEATURE_SVE_BF16) */ #define svreinterpret_u64_f32(...) __builtin_sve_reinterpret_u64_f32(__VA_ARGS__) #define svreinterpret_u64_f64(...) __builtin_sve_reinterpret_u64_f64(__VA_ARGS__) #define svreinterpret_f16_s8(...) __builtin_sve_reinterpret_f16_s8(__VA_ARGS__) @@ -257,47 +225,21 @@ enum svprfop #define svreinterpret_f16_u32(...) __builtin_sve_reinterpret_f16_u32(__VA_ARGS__) #define svreinterpret_f16_u64(...) __builtin_sve_reinterpret_f16_u64(__VA_ARGS__) #define svreinterpret_f16_f16(...) __builtin_sve_reinterpret_f16_f16(__VA_ARGS__) -#if defined(__ARM_FEATURE_SVE_BF16) #define svreinterpret_f16_bf16(...) __builtin_sve_reinterpret_f16_bf16(__VA_ARGS__) -#endif /* #if defined(__ARM_FEATURE_SVE_BF16) */ #define svreinterpret_f16_f32(...) __builtin_sve_reinterpret_f16_f32(__VA_ARGS__) #define svreinterpret_f16_f64(...) __builtin_sve_reinterpret_f16_f64(__VA_ARGS__) -#if defined(__ARM_FEATURE_SVE_BF16) #define svreinterpret_bf16_s8(...) __builtin_sve_reinterpret_bf16_s8(__VA_ARGS__) -#endif /* #if defined(__ARM_FEATURE_SVE_BF16) */ -#if defined(__ARM_FEATURE_SVE_BF16) #define svreinterpret_bf16_s16(...) __builtin_sve_reinterpret_bf16_s16(__VA_ARGS__) -#endif /* #if defined(__ARM_FEATURE_SVE_BF16) */ -#if defined(__ARM_FEATURE_SVE_BF16) #define svreinterpret_bf16_s32(...) __builtin_sve_reinterpret_bf16_s32(__VA_ARGS__) -#endif /* #if defined(__ARM_FEATURE_SVE_BF16) */ -#if defined(__ARM_FEATURE_SVE_BF16) #define svreinterpret_bf16_s64(...) __builtin_sve_reinterpret_bf16_s64(__VA_ARGS__) -#endif /* #if defined(__ARM_FEATURE_SVE_BF16) */ -#if defined(__ARM_FEATURE_SVE_BF16) #define svreinterpret_bf16_u8(...) __builtin_sve_reinterpret_bf16_u8(__VA_ARGS__) -#endif /* #if defined(__ARM_FEATURE_SVE_BF16) */ -#if defined(__ARM_FEATURE_SVE_BF16) #define svreinterpret_bf16_u16(...) __builtin_sve_reinterpret_bf16_u16(__VA_ARGS__) -#endif /* #if defined(__ARM_FEATURE_SVE_BF16) */ -#if defined(__ARM_FEATURE_SVE_BF16) #define svreinterpret_bf16_u32(...) __builtin_sve_reinterpret_bf16_u32(__VA_ARGS__) -#endif /* #if defined(__ARM_FEATURE_SVE_BF16) */ -#if defined(__ARM_FEATURE_SVE_BF16) #define svreinterpret_bf16_u64(...) __builtin_sve_reinterpret_bf16_u64(__VA_ARGS__) -#endif /* #if defined(__ARM_FEATURE_SVE_BF16) */ -#if defined(__ARM_FEATURE_SVE_BF16) #define svreinterpret_bf16_f16(...) __builtin_sve_reinterpret_bf16_f16(__VA_ARGS__) -#endif /* #if defined(__ARM_FEATURE_SVE_BF16) */ -#if defined(__ARM_FEATURE_SVE_BF16) #define svreinterpret_bf16_bf16(...) __builtin_sve_reinterpret_bf16_bf16(__VA_ARGS__) -#endif /* #if defined(__ARM_FEATURE_SVE_BF16) */ -#if defined(__ARM_FEATURE_SVE_BF16) #define svreinterpret_bf16_f32(...) __builtin_sve_reinterpret_bf16_f32(__VA_ARGS__) -#endif /* #if defined(__ARM_FEATURE_SVE_BF16) */ -#if defined(__ARM_FEATURE_SVE_BF16) #define svreinterpret_bf16_f64(...) __builtin_sve_reinterpret_bf16_f64(__VA_ARGS__) -#endif /* #if defined(__ARM_FEATURE_SVE_BF16) */ #define svreinterpret_f32_s8(...) __builtin_sve_reinterpret_f32_s8(__VA_ARGS__) #define svreinterpret_f32_s16(...) __builtin_sve_reinterpret_f32_s16(__VA_ARGS__) #define svreinterpret_f32_s32(...) __builtin_sve_reinterpret_f32_s32(__VA_ARGS__) @@ -307,9 +249,7 @@ enum svprfop #define svreinterpret_f32_u32(...) __builtin_sve_reinterpret_f32_u32(__VA_ARGS__) #define svreinterpret_f32_u64(...) __builtin_sve_reinterpret_f32_u64(__VA_ARGS__) #define svreinterpret_f32_f16(...) __builtin_sve_reinterpret_f32_f16(__VA_ARGS__) -#if defined(__ARM_FEATURE_SVE_BF16) #define svreinterpret_f32_bf16(...) __builtin_sve_reinterpret_f32_bf16(__VA_ARGS__) -#endif /* #if defined(__ARM_FEATURE_SVE_BF16) */ #define svreinterpret_f32_f32(...) __builtin_sve_reinterpret_f32_f32(__VA_ARGS__) #define svreinterpret_f32_f64(...) __builtin_sve_reinterpret_f32_f64(__VA_ARGS__) #define svreinterpret_f64_s8(...) __builtin_sve_reinterpret_f64_s8(__VA_ARGS__) @@ -321,630 +261,582 @@ enum svprfop #define svreinterpret_f64_u32(...) __builtin_sve_reinterpret_f64_u32(__VA_ARGS__) #define svreinterpret_f64_u64(...) __builtin_sve_reinterpret_f64_u64(__VA_ARGS__) #define svreinterpret_f64_f16(...) __builtin_sve_reinterpret_f64_f16(__VA_ARGS__) -#if defined(__ARM_FEATURE_SVE_BF16) #define svreinterpret_f64_bf16(...) __builtin_sve_reinterpret_f64_bf16(__VA_ARGS__) -#endif /* #if defined(__ARM_FEATURE_SVE_BF16) */ #define svreinterpret_f64_f32(...) __builtin_sve_reinterpret_f64_f32(__VA_ARGS__) #define svreinterpret_f64_f64(...) __builtin_sve_reinterpret_f64_f64(__VA_ARGS__) -__aio svint8_t svreinterpret_s8(svint8_t op) { +__aio __attribute__((target("sve"))) svint8_t svreinterpret_s8(svint8_t op) { return __builtin_sve_reinterpret_s8_s8(op); } -__aio svint8_t svreinterpret_s8(svint16_t op) { +__aio __attribute__((target("sve"))) svint8_t svreinterpret_s8(svint16_t op) { return __builtin_sve_reinterpret_s8_s16(op); } -__aio svint8_t svreinterpret_s8(svint32_t op) { +__aio __attribute__((target("sve"))) svint8_t svreinterpret_s8(svint32_t op) { return __builtin_sve_reinterpret_s8_s32(op); } -__aio svint8_t svreinterpret_s8(svint64_t op) { +__aio __attribute__((target("sve"))) svint8_t svreinterpret_s8(svint64_t op) { return __builtin_sve_reinterpret_s8_s64(op); } -__aio svint8_t svreinterpret_s8(svuint8_t op) { +__aio __attribute__((target("sve"))) svint8_t svreinterpret_s8(svuint8_t op) { return __builtin_sve_reinterpret_s8_u8(op); } -__aio svint8_t svreinterpret_s8(svuint16_t op) { +__aio __attribute__((target("sve"))) svint8_t svreinterpret_s8(svuint16_t op) { return __builtin_sve_reinterpret_s8_u16(op); } -__aio svint8_t svreinterpret_s8(svuint32_t op) { +__aio __attribute__((target("sve"))) svint8_t svreinterpret_s8(svuint32_t op) { return __builtin_sve_reinterpret_s8_u32(op); } -__aio svint8_t svreinterpret_s8(svuint64_t op) { +__aio __attribute__((target("sve"))) svint8_t svreinterpret_s8(svuint64_t op) { return __builtin_sve_reinterpret_s8_u64(op); } -__aio svint8_t svreinterpret_s8(svfloat16_t op) { +__aio __attribute__((target("sve"))) svint8_t svreinterpret_s8(svfloat16_t op) { return __builtin_sve_reinterpret_s8_f16(op); } -#if defined(__ARM_FEATURE_SVE_BF16) -__aio svint8_t svreinterpret_s8(svbfloat16_t op) { +__aio __attribute__((target("sve"))) svint8_t svreinterpret_s8(svbfloat16_t op) { return __builtin_sve_reinterpret_s8_bf16(op); } -#endif /* #if defined(__ARM_FEATURE_SVE_BF16) */ -__aio svint8_t svreinterpret_s8(svfloat32_t op) { +__aio __attribute__((target("sve"))) svint8_t svreinterpret_s8(svfloat32_t op) { return __builtin_sve_reinterpret_s8_f32(op); } -__aio svint8_t svreinterpret_s8(svfloat64_t op) { +__aio __attribute__((target("sve"))) svint8_t svreinterpret_s8(svfloat64_t op) { return __builtin_sve_reinterpret_s8_f64(op); } -__aio svint16_t svreinterpret_s16(svint8_t op) { +__aio __attribute__((target("sve"))) svint16_t svreinterpret_s16(svint8_t op) { return __builtin_sve_reinterpret_s16_s8(op); } -__aio svint16_t svreinterpret_s16(svint16_t op) { +__aio __attribute__((target("sve"))) svint16_t svreinterpret_s16(svint16_t op) { return __builtin_sve_reinterpret_s16_s16(op); } -__aio svint16_t svreinterpret_s16(svint32_t op) { +__aio __attribute__((target("sve"))) svint16_t svreinterpret_s16(svint32_t op) { return __builtin_sve_reinterpret_s16_s32(op); } -__aio svint16_t svreinterpret_s16(svint64_t op) { +__aio __attribute__((target("sve"))) svint16_t svreinterpret_s16(svint64_t op) { return __builtin_sve_reinterpret_s16_s64(op); } -__aio svint16_t svreinterpret_s16(svuint8_t op) { +__aio __attribute__((target("sve"))) svint16_t svreinterpret_s16(svuint8_t op) { return __builtin_sve_reinterpret_s16_u8(op); } -__aio svint16_t svreinterpret_s16(svuint16_t op) { +__aio __attribute__((target("sve"))) svint16_t svreinterpret_s16(svuint16_t op) { return __builtin_sve_reinterpret_s16_u16(op); } -__aio svint16_t svreinterpret_s16(svuint32_t op) { +__aio __attribute__((target("sve"))) svint16_t svreinterpret_s16(svuint32_t op) { return __builtin_sve_reinterpret_s16_u32(op); } -__aio svint16_t svreinterpret_s16(svuint64_t op) { +__aio __attribute__((target("sve"))) svint16_t svreinterpret_s16(svuint64_t op) { return __builtin_sve_reinterpret_s16_u64(op); } -__aio svint16_t svreinterpret_s16(svfloat16_t op) { +__aio __attribute__((target("sve"))) svint16_t svreinterpret_s16(svfloat16_t op) { return __builtin_sve_reinterpret_s16_f16(op); } -#if defined(__ARM_FEATURE_SVE_BF16) -__aio svint16_t svreinterpret_s16(svbfloat16_t op) { +__aio __attribute__((target("sve"))) svint16_t svreinterpret_s16(svbfloat16_t op) { return __builtin_sve_reinterpret_s16_bf16(op); } -#endif /* #if defined(__ARM_FEATURE_SVE_BF16) */ -__aio svint16_t svreinterpret_s16(svfloat32_t op) { +__aio __attribute__((target("sve"))) svint16_t svreinterpret_s16(svfloat32_t op) { return __builtin_sve_reinterpret_s16_f32(op); } -__aio svint16_t svreinterpret_s16(svfloat64_t op) { +__aio __attribute__((target("sve"))) svint16_t svreinterpret_s16(svfloat64_t op) { return __builtin_sve_reinterpret_s16_f64(op); } -__aio svint32_t svreinterpret_s32(svint8_t op) { +__aio __attribute__((target("sve"))) svint32_t svreinterpret_s32(svint8_t op) { return __builtin_sve_reinterpret_s32_s8(op); } -__aio svint32_t svreinterpret_s32(svint16_t op) { +__aio __attribute__((target("sve"))) svint32_t svreinterpret_s32(svint16_t op) { return __builtin_sve_reinterpret_s32_s16(op); } -__aio svint32_t svreinterpret_s32(svint32_t op) { +__aio __attribute__((target("sve"))) svint32_t svreinterpret_s32(svint32_t op) { return __builtin_sve_reinterpret_s32_s32(op); } -__aio svint32_t svreinterpret_s32(svint64_t op) { +__aio __attribute__((target("sve"))) svint32_t svreinterpret_s32(svint64_t op) { return __builtin_sve_reinterpret_s32_s64(op); } -__aio svint32_t svreinterpret_s32(svuint8_t op) { +__aio __attribute__((target("sve"))) svint32_t svreinterpret_s32(svuint8_t op) { return __builtin_sve_reinterpret_s32_u8(op); } -__aio svint32_t svreinterpret_s32(svuint16_t op) { +__aio __attribute__((target("sve"))) svint32_t svreinterpret_s32(svuint16_t op) { return __builtin_sve_reinterpret_s32_u16(op); } -__aio svint32_t svreinterpret_s32(svuint32_t op) { +__aio __attribute__((target("sve"))) svint32_t svreinterpret_s32(svuint32_t op) { return __builtin_sve_reinterpret_s32_u32(op); } -__aio svint32_t svreinterpret_s32(svuint64_t op) { +__aio __attribute__((target("sve"))) svint32_t svreinterpret_s32(svuint64_t op) { return __builtin_sve_reinterpret_s32_u64(op); } -__aio svint32_t svreinterpret_s32(svfloat16_t op) { +__aio __attribute__((target("sve"))) svint32_t svreinterpret_s32(svfloat16_t op) { return __builtin_sve_reinterpret_s32_f16(op); } -#if defined(__ARM_FEATURE_SVE_BF16) -__aio svint32_t svreinterpret_s32(svbfloat16_t op) { +__aio __attribute__((target("sve"))) svint32_t svreinterpret_s32(svbfloat16_t op) { return __builtin_sve_reinterpret_s32_bf16(op); } -#endif /* #if defined(__ARM_FEATURE_SVE_BF16) */ -__aio svint32_t svreinterpret_s32(svfloat32_t op) { +__aio __attribute__((target("sve"))) svint32_t svreinterpret_s32(svfloat32_t op) { return __builtin_sve_reinterpret_s32_f32(op); } -__aio svint32_t svreinterpret_s32(svfloat64_t op) { +__aio __attribute__((target("sve"))) svint32_t svreinterpret_s32(svfloat64_t op) { return __builtin_sve_reinterpret_s32_f64(op); } -__aio svint64_t svreinterpret_s64(svint8_t op) { +__aio __attribute__((target("sve"))) svint64_t svreinterpret_s64(svint8_t op) { return __builtin_sve_reinterpret_s64_s8(op); } -__aio svint64_t svreinterpret_s64(svint16_t op) { +__aio __attribute__((target("sve"))) svint64_t svreinterpret_s64(svint16_t op) { return __builtin_sve_reinterpret_s64_s16(op); } -__aio svint64_t svreinterpret_s64(svint32_t op) { +__aio __attribute__((target("sve"))) svint64_t svreinterpret_s64(svint32_t op) { return __builtin_sve_reinterpret_s64_s32(op); } -__aio svint64_t svreinterpret_s64(svint64_t op) { +__aio __attribute__((target("sve"))) svint64_t svreinterpret_s64(svint64_t op) { return __builtin_sve_reinterpret_s64_s64(op); } -__aio svint64_t svreinterpret_s64(svuint8_t op) { +__aio __attribute__((target("sve"))) svint64_t svreinterpret_s64(svuint8_t op) { return __builtin_sve_reinterpret_s64_u8(op); } -__aio svint64_t svreinterpret_s64(svuint16_t op) { +__aio __attribute__((target("sve"))) svint64_t svreinterpret_s64(svuint16_t op) { return __builtin_sve_reinterpret_s64_u16(op); } -__aio svint64_t svreinterpret_s64(svuint32_t op) { +__aio __attribute__((target("sve"))) svint64_t svreinterpret_s64(svuint32_t op) { return __builtin_sve_reinterpret_s64_u32(op); } -__aio svint64_t svreinterpret_s64(svuint64_t op) { +__aio __attribute__((target("sve"))) svint64_t svreinterpret_s64(svuint64_t op) { return __builtin_sve_reinterpret_s64_u64(op); } -__aio svint64_t svreinterpret_s64(svfloat16_t op) { +__aio __attribute__((target("sve"))) svint64_t svreinterpret_s64(svfloat16_t op) { return __builtin_sve_reinterpret_s64_f16(op); } -#if defined(__ARM_FEATURE_SVE_BF16) -__aio svint64_t svreinterpret_s64(svbfloat16_t op) { +__aio __attribute__((target("sve"))) svint64_t svreinterpret_s64(svbfloat16_t op) { return __builtin_sve_reinterpret_s64_bf16(op); } -#endif /* #if defined(__ARM_FEATURE_SVE_BF16) */ -__aio svint64_t svreinterpret_s64(svfloat32_t op) { +__aio __attribute__((target("sve"))) svint64_t svreinterpret_s64(svfloat32_t op) { return __builtin_sve_reinterpret_s64_f32(op); } -__aio svint64_t svreinterpret_s64(svfloat64_t op) { +__aio __attribute__((target("sve"))) svint64_t svreinterpret_s64(svfloat64_t op) { return __builtin_sve_reinterpret_s64_f64(op); } -__aio svuint8_t svreinterpret_u8(svint8_t op) { +__aio __attribute__((target("sve"))) svuint8_t svreinterpret_u8(svint8_t op) { return __builtin_sve_reinterpret_u8_s8(op); } -__aio svuint8_t svreinterpret_u8(svint16_t op) { +__aio __attribute__((target("sve"))) svuint8_t svreinterpret_u8(svint16_t op) { return __builtin_sve_reinterpret_u8_s16(op); } -__aio svuint8_t svreinterpret_u8(svint32_t op) { +__aio __attribute__((target("sve"))) svuint8_t svreinterpret_u8(svint32_t op) { return __builtin_sve_reinterpret_u8_s32(op); } -__aio svuint8_t svreinterpret_u8(svint64_t op) { +__aio __attribute__((target("sve"))) svuint8_t svreinterpret_u8(svint64_t op) { return __builtin_sve_reinterpret_u8_s64(op); } -__aio svuint8_t svreinterpret_u8(svuint8_t op) { +__aio __attribute__((target("sve"))) svuint8_t svreinterpret_u8(svuint8_t op) { return __builtin_sve_reinterpret_u8_u8(op); } -__aio svuint8_t svreinterpret_u8(svuint16_t op) { +__aio __attribute__((target("sve"))) svuint8_t svreinterpret_u8(svuint16_t op) { return __builtin_sve_reinterpret_u8_u16(op); } -__aio svuint8_t svreinterpret_u8(svuint32_t op) { +__aio __attribute__((target("sve"))) svuint8_t svreinterpret_u8(svuint32_t op) { return __builtin_sve_reinterpret_u8_u32(op); } -__aio svuint8_t svreinterpret_u8(svuint64_t op) { +__aio __attribute__((target("sve"))) svuint8_t svreinterpret_u8(svuint64_t op) { return __builtin_sve_reinterpret_u8_u64(op); } -__aio svuint8_t svreinterpret_u8(svfloat16_t op) { +__aio __attribute__((target("sve"))) svuint8_t svreinterpret_u8(svfloat16_t op) { return __builtin_sve_reinterpret_u8_f16(op); } -#if defined(__ARM_FEATURE_SVE_BF16) -__aio svuint8_t svreinterpret_u8(svbfloat16_t op) { +__aio __attribute__((target("sve"))) svuint8_t svreinterpret_u8(svbfloat16_t op) { return __builtin_sve_reinterpret_u8_bf16(op); } -#endif /* #if defined(__ARM_FEATURE_SVE_BF16) */ -__aio svuint8_t svreinterpret_u8(svfloat32_t op) { +__aio __attribute__((target("sve"))) svuint8_t svreinterpret_u8(svfloat32_t op) { return __builtin_sve_reinterpret_u8_f32(op); } -__aio svuint8_t svreinterpret_u8(svfloat64_t op) { +__aio __attribute__((target("sve"))) svuint8_t svreinterpret_u8(svfloat64_t op) { return __builtin_sve_reinterpret_u8_f64(op); } -__aio svuint16_t svreinterpret_u16(svint8_t op) { +__aio __attribute__((target("sve"))) svuint16_t svreinterpret_u16(svint8_t op) { return __builtin_sve_reinterpret_u16_s8(op); } -__aio svuint16_t svreinterpret_u16(svint16_t op) { +__aio __attribute__((target("sve"))) svuint16_t svreinterpret_u16(svint16_t op) { return __builtin_sve_reinterpret_u16_s16(op); } -__aio svuint16_t svreinterpret_u16(svint32_t op) { +__aio __attribute__((target("sve"))) svuint16_t svreinterpret_u16(svint32_t op) { return __builtin_sve_reinterpret_u16_s32(op); } -__aio svuint16_t svreinterpret_u16(svint64_t op) { +__aio __attribute__((target("sve"))) svuint16_t svreinterpret_u16(svint64_t op) { return __builtin_sve_reinterpret_u16_s64(op); } -__aio svuint16_t svreinterpret_u16(svuint8_t op) { +__aio __attribute__((target("sve"))) svuint16_t svreinterpret_u16(svuint8_t op) { return __builtin_sve_reinterpret_u16_u8(op); } -__aio svuint16_t svreinterpret_u16(svuint16_t op) { +__aio __attribute__((target("sve"))) svuint16_t svreinterpret_u16(svuint16_t op) { return __builtin_sve_reinterpret_u16_u16(op); } -__aio svuint16_t svreinterpret_u16(svuint32_t op) { +__aio __attribute__((target("sve"))) svuint16_t svreinterpret_u16(svuint32_t op) { return __builtin_sve_reinterpret_u16_u32(op); } -__aio svuint16_t svreinterpret_u16(svuint64_t op) { +__aio __attribute__((target("sve"))) svuint16_t svreinterpret_u16(svuint64_t op) { return __builtin_sve_reinterpret_u16_u64(op); } -__aio svuint16_t svreinterpret_u16(svfloat16_t op) { +__aio __attribute__((target("sve"))) svuint16_t svreinterpret_u16(svfloat16_t op) { return __builtin_sve_reinterpret_u16_f16(op); } -#if defined(__ARM_FEATURE_SVE_BF16) -__aio svuint16_t svreinterpret_u16(svbfloat16_t op) { +__aio __attribute__((target("sve"))) svuint16_t svreinterpret_u16(svbfloat16_t op) { return __builtin_sve_reinterpret_u16_bf16(op); } -#endif /* #if defined(__ARM_FEATURE_SVE_BF16) */ -__aio svuint16_t svreinterpret_u16(svfloat32_t op) { +__aio __attribute__((target("sve"))) svuint16_t svreinterpret_u16(svfloat32_t op) { return __builtin_sve_reinterpret_u16_f32(op); } -__aio svuint16_t svreinterpret_u16(svfloat64_t op) { +__aio __attribute__((target("sve"))) svuint16_t svreinterpret_u16(svfloat64_t op) { return __builtin_sve_reinterpret_u16_f64(op); } -__aio svuint32_t svreinterpret_u32(svint8_t op) { +__aio __attribute__((target("sve"))) svuint32_t svreinterpret_u32(svint8_t op) { return __builtin_sve_reinterpret_u32_s8(op); } -__aio svuint32_t svreinterpret_u32(svint16_t op) { +__aio __attribute__((target("sve"))) svuint32_t svreinterpret_u32(svint16_t op) { return __builtin_sve_reinterpret_u32_s16(op); } -__aio svuint32_t svreinterpret_u32(svint32_t op) { +__aio __attribute__((target("sve"))) svuint32_t svreinterpret_u32(svint32_t op) { return __builtin_sve_reinterpret_u32_s32(op); } -__aio svuint32_t svreinterpret_u32(svint64_t op) { +__aio __attribute__((target("sve"))) svuint32_t svreinterpret_u32(svint64_t op) { return __builtin_sve_reinterpret_u32_s64(op); } -__aio svuint32_t svreinterpret_u32(svuint8_t op) { +__aio __attribute__((target("sve"))) svuint32_t svreinterpret_u32(svuint8_t op) { return __builtin_sve_reinterpret_u32_u8(op); } -__aio svuint32_t svreinterpret_u32(svuint16_t op) { +__aio __attribute__((target("sve"))) svuint32_t svreinterpret_u32(svuint16_t op) { return __builtin_sve_reinterpret_u32_u16(op); } -__aio svuint32_t svreinterpret_u32(svuint32_t op) { +__aio __attribute__((target("sve"))) svuint32_t svreinterpret_u32(svuint32_t op) { return __builtin_sve_reinterpret_u32_u32(op); } -__aio svuint32_t svreinterpret_u32(svuint64_t op) { +__aio __attribute__((target("sve"))) svuint32_t svreinterpret_u32(svuint64_t op) { return __builtin_sve_reinterpret_u32_u64(op); } -__aio svuint32_t svreinterpret_u32(svfloat16_t op) { +__aio __attribute__((target("sve"))) svuint32_t svreinterpret_u32(svfloat16_t op) { return __builtin_sve_reinterpret_u32_f16(op); } -#if defined(__ARM_FEATURE_SVE_BF16) -__aio svuint32_t svreinterpret_u32(svbfloat16_t op) { +__aio __attribute__((target("sve"))) svuint32_t svreinterpret_u32(svbfloat16_t op) { return __builtin_sve_reinterpret_u32_bf16(op); } -#endif /* #if defined(__ARM_FEATURE_SVE_BF16) */ -__aio svuint32_t svreinterpret_u32(svfloat32_t op) { +__aio __attribute__((target("sve"))) svuint32_t svreinterpret_u32(svfloat32_t op) { return __builtin_sve_reinterpret_u32_f32(op); } -__aio svuint32_t svreinterpret_u32(svfloat64_t op) { +__aio __attribute__((target("sve"))) svuint32_t svreinterpret_u32(svfloat64_t op) { return __builtin_sve_reinterpret_u32_f64(op); } -__aio svuint64_t svreinterpret_u64(svint8_t op) { +__aio __attribute__((target("sve"))) svuint64_t svreinterpret_u64(svint8_t op) { return __builtin_sve_reinterpret_u64_s8(op); } -__aio svuint64_t svreinterpret_u64(svint16_t op) { +__aio __attribute__((target("sve"))) svuint64_t svreinterpret_u64(svint16_t op) { return __builtin_sve_reinterpret_u64_s16(op); } -__aio svuint64_t svreinterpret_u64(svint32_t op) { +__aio __attribute__((target("sve"))) svuint64_t svreinterpret_u64(svint32_t op) { return __builtin_sve_reinterpret_u64_s32(op); } -__aio svuint64_t svreinterpret_u64(svint64_t op) { +__aio __attribute__((target("sve"))) svuint64_t svreinterpret_u64(svint64_t op) { return __builtin_sve_reinterpret_u64_s64(op); } -__aio svuint64_t svreinterpret_u64(svuint8_t op) { +__aio __attribute__((target("sve"))) svuint64_t svreinterpret_u64(svuint8_t op) { return __builtin_sve_reinterpret_u64_u8(op); } -__aio svuint64_t svreinterpret_u64(svuint16_t op) { +__aio __attribute__((target("sve"))) svuint64_t svreinterpret_u64(svuint16_t op) { return __builtin_sve_reinterpret_u64_u16(op); } -__aio svuint64_t svreinterpret_u64(svuint32_t op) { +__aio __attribute__((target("sve"))) svuint64_t svreinterpret_u64(svuint32_t op) { return __builtin_sve_reinterpret_u64_u32(op); } -__aio svuint64_t svreinterpret_u64(svuint64_t op) { +__aio __attribute__((target("sve"))) svuint64_t svreinterpret_u64(svuint64_t op) { return __builtin_sve_reinterpret_u64_u64(op); } -__aio svuint64_t svreinterpret_u64(svfloat16_t op) { +__aio __attribute__((target("sve"))) svuint64_t svreinterpret_u64(svfloat16_t op) { return __builtin_sve_reinterpret_u64_f16(op); } -#if defined(__ARM_FEATURE_SVE_BF16) -__aio svuint64_t svreinterpret_u64(svbfloat16_t op) { +__aio __attribute__((target("sve"))) svuint64_t svreinterpret_u64(svbfloat16_t op) { return __builtin_sve_reinterpret_u64_bf16(op); } -#endif /* #if defined(__ARM_FEATURE_SVE_BF16) */ -__aio svuint64_t svreinterpret_u64(svfloat32_t op) { +__aio __attribute__((target("sve"))) svuint64_t svreinterpret_u64(svfloat32_t op) { return __builtin_sve_reinterpret_u64_f32(op); } -__aio svuint64_t svreinterpret_u64(svfloat64_t op) { +__aio __attribute__((target("sve"))) svuint64_t svreinterpret_u64(svfloat64_t op) { return __builtin_sve_reinterpret_u64_f64(op); } -__aio svfloat16_t svreinterpret_f16(svint8_t op) { +__aio __attribute__((target("sve"))) svfloat16_t svreinterpret_f16(svint8_t op) { return __builtin_sve_reinterpret_f16_s8(op); } -__aio svfloat16_t svreinterpret_f16(svint16_t op) { +__aio __attribute__((target("sve"))) svfloat16_t svreinterpret_f16(svint16_t op) { return __builtin_sve_reinterpret_f16_s16(op); } -__aio svfloat16_t svreinterpret_f16(svint32_t op) { +__aio __attribute__((target("sve"))) svfloat16_t svreinterpret_f16(svint32_t op) { return __builtin_sve_reinterpret_f16_s32(op); } -__aio svfloat16_t svreinterpret_f16(svint64_t op) { +__aio __attribute__((target("sve"))) svfloat16_t svreinterpret_f16(svint64_t op) { return __builtin_sve_reinterpret_f16_s64(op); } -__aio svfloat16_t svreinterpret_f16(svuint8_t op) { +__aio __attribute__((target("sve"))) svfloat16_t svreinterpret_f16(svuint8_t op) { return __builtin_sve_reinterpret_f16_u8(op); } -__aio svfloat16_t svreinterpret_f16(svuint16_t op) { +__aio __attribute__((target("sve"))) svfloat16_t svreinterpret_f16(svuint16_t op) { return __builtin_sve_reinterpret_f16_u16(op); } -__aio svfloat16_t svreinterpret_f16(svuint32_t op) { +__aio __attribute__((target("sve"))) svfloat16_t svreinterpret_f16(svuint32_t op) { return __builtin_sve_reinterpret_f16_u32(op); } -__aio svfloat16_t svreinterpret_f16(svuint64_t op) { +__aio __attribute__((target("sve"))) svfloat16_t svreinterpret_f16(svuint64_t op) { return __builtin_sve_reinterpret_f16_u64(op); } -__aio svfloat16_t svreinterpret_f16(svfloat16_t op) { +__aio __attribute__((target("sve"))) svfloat16_t svreinterpret_f16(svfloat16_t op) { return __builtin_sve_reinterpret_f16_f16(op); } -#if defined(__ARM_FEATURE_SVE_BF16) -__aio svfloat16_t svreinterpret_f16(svbfloat16_t op) { +__aio __attribute__((target("sve"))) svfloat16_t svreinterpret_f16(svbfloat16_t op) { return __builtin_sve_reinterpret_f16_bf16(op); } -#endif /* #if defined(__ARM_FEATURE_SVE_BF16) */ -__aio svfloat16_t svreinterpret_f16(svfloat32_t op) { +__aio __attribute__((target("sve"))) svfloat16_t svreinterpret_f16(svfloat32_t op) { return __builtin_sve_reinterpret_f16_f32(op); } -__aio svfloat16_t svreinterpret_f16(svfloat64_t op) { +__aio __attribute__((target("sve"))) svfloat16_t svreinterpret_f16(svfloat64_t op) { return __builtin_sve_reinterpret_f16_f64(op); } -#if defined(__ARM_FEATURE_SVE_BF16) -__aio svbfloat16_t svreinterpret_bf16(svint8_t op) { +__aio __attribute__((target("sve"))) svbfloat16_t svreinterpret_bf16(svint8_t op) { return __builtin_sve_reinterpret_bf16_s8(op); } -#endif /* #if defined(__ARM_FEATURE_SVE_BF16) */ -#if defined(__ARM_FEATURE_SVE_BF16) -__aio svbfloat16_t svreinterpret_bf16(svint16_t op) { +__aio __attribute__((target("sve"))) svbfloat16_t svreinterpret_bf16(svint16_t op) { return __builtin_sve_reinterpret_bf16_s16(op); } -#endif /* #if defined(__ARM_FEATURE_SVE_BF16) */ -#if defined(__ARM_FEATURE_SVE_BF16) -__aio svbfloat16_t svreinterpret_bf16(svint32_t op) { +__aio __attribute__((target("sve"))) svbfloat16_t svreinterpret_bf16(svint32_t op) { return __builtin_sve_reinterpret_bf16_s32(op); } -#endif /* #if defined(__ARM_FEATURE_SVE_BF16) */ -#if defined(__ARM_FEATURE_SVE_BF16) -__aio svbfloat16_t svreinterpret_bf16(svint64_t op) { +__aio __attribute__((target("sve"))) svbfloat16_t svreinterpret_bf16(svint64_t op) { return __builtin_sve_reinterpret_bf16_s64(op); } -#endif /* #if defined(__ARM_FEATURE_SVE_BF16) */ -#if defined(__ARM_FEATURE_SVE_BF16) -__aio svbfloat16_t svreinterpret_bf16(svuint8_t op) { +__aio __attribute__((target("sve"))) svbfloat16_t svreinterpret_bf16(svuint8_t op) { return __builtin_sve_reinterpret_bf16_u8(op); } -#endif /* #if defined(__ARM_FEATURE_SVE_BF16) */ -#if defined(__ARM_FEATURE_SVE_BF16) -__aio svbfloat16_t svreinterpret_bf16(svuint16_t op) { +__aio __attribute__((target("sve"))) svbfloat16_t svreinterpret_bf16(svuint16_t op) { return __builtin_sve_reinterpret_bf16_u16(op); } -#endif /* #if defined(__ARM_FEATURE_SVE_BF16) */ -#if defined(__ARM_FEATURE_SVE_BF16) -__aio svbfloat16_t svreinterpret_bf16(svuint32_t op) { +__aio __attribute__((target("sve"))) svbfloat16_t svreinterpret_bf16(svuint32_t op) { return __builtin_sve_reinterpret_bf16_u32(op); } -#endif /* #if defined(__ARM_FEATURE_SVE_BF16) */ -#if defined(__ARM_FEATURE_SVE_BF16) -__aio svbfloat16_t svreinterpret_bf16(svuint64_t op) { +__aio __attribute__((target("sve"))) svbfloat16_t svreinterpret_bf16(svuint64_t op) { return __builtin_sve_reinterpret_bf16_u64(op); } -#endif /* #if defined(__ARM_FEATURE_SVE_BF16) */ -#if defined(__ARM_FEATURE_SVE_BF16) -__aio svbfloat16_t svreinterpret_bf16(svfloat16_t op) { +__aio __attribute__((target("sve"))) svbfloat16_t svreinterpret_bf16(svfloat16_t op) { return __builtin_sve_reinterpret_bf16_f16(op); } -#endif /* #if defined(__ARM_FEATURE_SVE_BF16) */ -#if defined(__ARM_FEATURE_SVE_BF16) -__aio svbfloat16_t svreinterpret_bf16(svbfloat16_t op) { +__aio __attribute__((target("sve"))) svbfloat16_t svreinterpret_bf16(svbfloat16_t op) { return __builtin_sve_reinterpret_bf16_bf16(op); } -#endif /* #if defined(__ARM_FEATURE_SVE_BF16) */ -#if defined(__ARM_FEATURE_SVE_BF16) -__aio svbfloat16_t svreinterpret_bf16(svfloat32_t op) { +__aio __attribute__((target("sve"))) svbfloat16_t svreinterpret_bf16(svfloat32_t op) { return __builtin_sve_reinterpret_bf16_f32(op); } -#endif /* #if defined(__ARM_FEATURE_SVE_BF16) */ -#if defined(__ARM_FEATURE_SVE_BF16) -__aio svbfloat16_t svreinterpret_bf16(svfloat64_t op) { +__aio __attribute__((target("sve"))) svbfloat16_t svreinterpret_bf16(svfloat64_t op) { return __builtin_sve_reinterpret_bf16_f64(op); } -#endif /* #if defined(__ARM_FEATURE_SVE_BF16) */ -__aio svfloat32_t svreinterpret_f32(svint8_t op) { +__aio __attribute__((target("sve"))) svfloat32_t svreinterpret_f32(svint8_t op) { return __builtin_sve_reinterpret_f32_s8(op); } -__aio svfloat32_t svreinterpret_f32(svint16_t op) { +__aio __attribute__((target("sve"))) svfloat32_t svreinterpret_f32(svint16_t op) { return __builtin_sve_reinterpret_f32_s16(op); } -__aio svfloat32_t svreinterpret_f32(svint32_t op) { +__aio __attribute__((target("sve"))) svfloat32_t svreinterpret_f32(svint32_t op) { return __builtin_sve_reinterpret_f32_s32(op); } -__aio svfloat32_t svreinterpret_f32(svint64_t op) { +__aio __attribute__((target("sve"))) svfloat32_t svreinterpret_f32(svint64_t op) { return __builtin_sve_reinterpret_f32_s64(op); } -__aio svfloat32_t svreinterpret_f32(svuint8_t op) { +__aio __attribute__((target("sve"))) svfloat32_t svreinterpret_f32(svuint8_t op) { return __builtin_sve_reinterpret_f32_u8(op); } -__aio svfloat32_t svreinterpret_f32(svuint16_t op) { +__aio __attribute__((target("sve"))) svfloat32_t svreinterpret_f32(svuint16_t op) { return __builtin_sve_reinterpret_f32_u16(op); } -__aio svfloat32_t svreinterpret_f32(svuint32_t op) { +__aio __attribute__((target("sve"))) svfloat32_t svreinterpret_f32(svuint32_t op) { return __builtin_sve_reinterpret_f32_u32(op); } -__aio svfloat32_t svreinterpret_f32(svuint64_t op) { +__aio __attribute__((target("sve"))) svfloat32_t svreinterpret_f32(svuint64_t op) { return __builtin_sve_reinterpret_f32_u64(op); } -__aio svfloat32_t svreinterpret_f32(svfloat16_t op) { +__aio __attribute__((target("sve"))) svfloat32_t svreinterpret_f32(svfloat16_t op) { return __builtin_sve_reinterpret_f32_f16(op); } -#if defined(__ARM_FEATURE_SVE_BF16) -__aio svfloat32_t svreinterpret_f32(svbfloat16_t op) { +__aio __attribute__((target("sve"))) svfloat32_t svreinterpret_f32(svbfloat16_t op) { return __builtin_sve_reinterpret_f32_bf16(op); } -#endif /* #if defined(__ARM_FEATURE_SVE_BF16) */ -__aio svfloat32_t svreinterpret_f32(svfloat32_t op) { +__aio __attribute__((target("sve"))) svfloat32_t svreinterpret_f32(svfloat32_t op) { return __builtin_sve_reinterpret_f32_f32(op); } -__aio svfloat32_t svreinterpret_f32(svfloat64_t op) { +__aio __attribute__((target("sve"))) svfloat32_t svreinterpret_f32(svfloat64_t op) { return __builtin_sve_reinterpret_f32_f64(op); } -__aio svfloat64_t svreinterpret_f64(svint8_t op) { +__aio __attribute__((target("sve"))) svfloat64_t svreinterpret_f64(svint8_t op) { return __builtin_sve_reinterpret_f64_s8(op); } -__aio svfloat64_t svreinterpret_f64(svint16_t op) { +__aio __attribute__((target("sve"))) svfloat64_t svreinterpret_f64(svint16_t op) { return __builtin_sve_reinterpret_f64_s16(op); } -__aio svfloat64_t svreinterpret_f64(svint32_t op) { +__aio __attribute__((target("sve"))) svfloat64_t svreinterpret_f64(svint32_t op) { return __builtin_sve_reinterpret_f64_s32(op); } -__aio svfloat64_t svreinterpret_f64(svint64_t op) { +__aio __attribute__((target("sve"))) svfloat64_t svreinterpret_f64(svint64_t op) { return __builtin_sve_reinterpret_f64_s64(op); } -__aio svfloat64_t svreinterpret_f64(svuint8_t op) { +__aio __attribute__((target("sve"))) svfloat64_t svreinterpret_f64(svuint8_t op) { return __builtin_sve_reinterpret_f64_u8(op); } -__aio svfloat64_t svreinterpret_f64(svuint16_t op) { +__aio __attribute__((target("sve"))) svfloat64_t svreinterpret_f64(svuint16_t op) { return __builtin_sve_reinterpret_f64_u16(op); } -__aio svfloat64_t svreinterpret_f64(svuint32_t op) { +__aio __attribute__((target("sve"))) svfloat64_t svreinterpret_f64(svuint32_t op) { return __builtin_sve_reinterpret_f64_u32(op); } -__aio svfloat64_t svreinterpret_f64(svuint64_t op) { +__aio __attribute__((target("sve"))) svfloat64_t svreinterpret_f64(svuint64_t op) { return __builtin_sve_reinterpret_f64_u64(op); } -__aio svfloat64_t svreinterpret_f64(svfloat16_t op) { +__aio __attribute__((target("sve"))) svfloat64_t svreinterpret_f64(svfloat16_t op) { return __builtin_sve_reinterpret_f64_f16(op); } -#if defined(__ARM_FEATURE_SVE_BF16) -__aio svfloat64_t svreinterpret_f64(svbfloat16_t op) { +__aio __attribute__((target("sve"))) svfloat64_t svreinterpret_f64(svbfloat16_t op) { return __builtin_sve_reinterpret_f64_bf16(op); } -#endif /* #if defined(__ARM_FEATURE_SVE_BF16) */ -__aio svfloat64_t svreinterpret_f64(svfloat32_t op) { +__aio __attribute__((target("sve"))) svfloat64_t svreinterpret_f64(svfloat32_t op) { return __builtin_sve_reinterpret_f64_f32(op); } -__aio svfloat64_t svreinterpret_f64(svfloat64_t op) { +__aio __attribute__((target("sve"))) svfloat64_t svreinterpret_f64(svfloat64_t op) { return __builtin_sve_reinterpret_f64_f64(op); } @@ -15698,107 +15590,702 @@ __aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2_s64))) svint64_t svzip2(svint64_t, svint64_t); __aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2_s16))) svint16_t svzip2(svint16_t, svint16_t); - -#if defined (__ARM_FEATURE_SVE2_BITPERM) -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbdep_n_u8))) -svuint8_t svbdep_n_u8(svuint8_t, uint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbdep_n_u32))) -svuint32_t svbdep_n_u32(svuint32_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbdep_n_u64))) -svuint64_t svbdep_n_u64(svuint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbdep_n_u16))) -svuint16_t svbdep_n_u16(svuint16_t, uint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbdep_u8))) -svuint8_t svbdep_u8(svuint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbdep_u32))) -svuint32_t svbdep_u32(svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbdep_u64))) -svuint64_t svbdep_u64(svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbdep_u16))) -svuint16_t svbdep_u16(svuint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbext_n_u8))) -svuint8_t svbext_n_u8(svuint8_t, uint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbext_n_u32))) -svuint32_t svbext_n_u32(svuint32_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbext_n_u64))) -svuint64_t svbext_n_u64(svuint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbext_n_u16))) -svuint16_t svbext_n_u16(svuint16_t, uint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbext_u8))) -svuint8_t svbext_u8(svuint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbext_u32))) -svuint32_t svbext_u32(svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbext_u64))) -svuint64_t svbext_u64(svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbext_u16))) -svuint16_t svbext_u16(svuint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbgrp_n_u8))) -svuint8_t svbgrp_n_u8(svuint8_t, uint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbgrp_n_u32))) -svuint32_t svbgrp_n_u32(svuint32_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbgrp_n_u64))) -svuint64_t svbgrp_n_u64(svuint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbgrp_n_u16))) -svuint16_t svbgrp_n_u16(svuint16_t, uint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbgrp_u8))) -svuint8_t svbgrp_u8(svuint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbgrp_u32))) -svuint32_t svbgrp_u32(svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbgrp_u64))) -svuint64_t svbgrp_u64(svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbgrp_u16))) -svuint16_t svbgrp_u16(svuint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbdep_n_u8))) -svuint8_t svbdep(svuint8_t, uint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbdep_n_u32))) -svuint32_t svbdep(svuint32_t, uint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbdep_n_u64))) -svuint64_t svbdep(svuint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbdep_n_u16))) -svuint16_t svbdep(svuint16_t, uint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbdep_u8))) -svuint8_t svbdep(svuint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbdep_u32))) -svuint32_t svbdep(svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbdep_u64))) -svuint64_t svbdep(svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbdep_u16))) -svuint16_t svbdep(svuint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbext_n_u8))) -svuint8_t svbext(svuint8_t, uint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbext_n_u32))) -svuint32_t svbext(svuint32_t, uint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbext_n_u64))) -svuint64_t svbext(svuint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbext_n_u16))) -svuint16_t svbext(svuint16_t, uint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbext_u8))) -svuint8_t svbext(svuint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbext_u32))) -svuint32_t svbext(svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbext_u64))) -svuint64_t svbext(svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbext_u16))) -svuint16_t svbext(svuint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbgrp_n_u8))) -svuint8_t svbgrp(svuint8_t, uint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbgrp_n_u32))) -svuint32_t svbgrp(svuint32_t, uint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbgrp_n_u64))) -svuint64_t svbgrp(svuint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbgrp_n_u16))) -svuint16_t svbgrp(svuint16_t, uint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbgrp_u8))) -svuint8_t svbgrp(svuint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbgrp_u32))) -svuint32_t svbgrp(svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbgrp_u64))) -svuint64_t svbgrp(svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbgrp_u16))) -svuint16_t svbgrp(svuint16_t, svuint16_t); -#endif //defined (__ARM_FEATURE_SVE2_BITPERM) - -#if defined(__ARM_FEATURE_SVE2) +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbfdot_n_f32))) +svfloat32_t svbfdot_n_f32(svfloat32_t, svbfloat16_t, bfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbfdot_f32))) +svfloat32_t svbfdot_f32(svfloat32_t, svbfloat16_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbfdot_lane_f32))) +svfloat32_t svbfdot_lane_f32(svfloat32_t, svbfloat16_t, svbfloat16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbfmlalb_n_f32))) +svfloat32_t svbfmlalb_n_f32(svfloat32_t, svbfloat16_t, bfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbfmlalb_f32))) +svfloat32_t svbfmlalb_f32(svfloat32_t, svbfloat16_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbfmlalb_lane_f32))) +svfloat32_t svbfmlalb_lane_f32(svfloat32_t, svbfloat16_t, svbfloat16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbfmlalt_n_f32))) +svfloat32_t svbfmlalt_n_f32(svfloat32_t, svbfloat16_t, bfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbfmlalt_f32))) +svfloat32_t svbfmlalt_f32(svfloat32_t, svbfloat16_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbfmlalt_lane_f32))) +svfloat32_t svbfmlalt_lane_f32(svfloat32_t, svbfloat16_t, svbfloat16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbfmmla_f32))) +svfloat32_t svbfmmla_f32(svfloat32_t, svbfloat16_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_n_bf16))) +bfloat16_t svclasta_n_bf16(svbool_t, bfloat16_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_bf16))) +svbfloat16_t svclasta_bf16(svbool_t, svbfloat16_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_n_bf16))) +bfloat16_t svclastb_n_bf16(svbool_t, bfloat16_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_bf16))) +svbfloat16_t svclastb_bf16(svbool_t, svbfloat16_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_bf16_m))) +svuint16_t svcnt_bf16_m(svuint16_t, svbool_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_bf16_x))) +svuint16_t svcnt_bf16_x(svbool_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_bf16_z))) +svuint16_t svcnt_bf16_z(svbool_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate2_bf16))) +svbfloat16x2_t svcreate2_bf16(svbfloat16_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate3_bf16))) +svbfloat16x3_t svcreate3_bf16(svbfloat16_t, svbfloat16_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate4_bf16))) +svbfloat16x4_t svcreate4_bf16(svbfloat16_t, svbfloat16_t, svbfloat16_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_bf16_f32_m))) +svbfloat16_t svcvt_bf16_f32_m(svbfloat16_t, svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_bf16_f32_x))) +svbfloat16_t svcvt_bf16_f32_x(svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_bf16_f32_z))) +svbfloat16_t svcvt_bf16_f32_z(svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvtnt_bf16_f32_m))) +svbfloat16_t svcvtnt_bf16_f32_m(svbfloat16_t, svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_bf16))) +svbfloat16_t svdup_n_bf16(bfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_bf16_m))) +svbfloat16_t svdup_n_bf16_m(svbfloat16_t, svbool_t, bfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_bf16_x))) +svbfloat16_t svdup_n_bf16_x(svbool_t, bfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_bf16_z))) +svbfloat16_t svdup_n_bf16_z(svbool_t, bfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_lane_bf16))) +svbfloat16_t svdup_lane_bf16(svbfloat16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_bf16))) +svbfloat16_t svdupq_n_bf16(bfloat16_t, bfloat16_t, bfloat16_t, bfloat16_t, bfloat16_t, bfloat16_t, bfloat16_t, bfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_lane_bf16))) +svbfloat16_t svdupq_lane_bf16(svbfloat16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svext_bf16))) +svbfloat16_t svext_bf16(svbfloat16_t, svbfloat16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget2_bf16))) +svbfloat16_t svget2_bf16(svbfloat16x2_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget3_bf16))) +svbfloat16_t svget3_bf16(svbfloat16x3_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget4_bf16))) +svbfloat16_t svget4_bf16(svbfloat16x4_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svinsr_n_bf16))) +svbfloat16_t svinsr_n_bf16(svbfloat16_t, bfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlasta_bf16))) +bfloat16_t svlasta_bf16(svbool_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlastb_bf16))) +bfloat16_t svlastb_bf16(svbool_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_bf16))) +svbfloat16_t svld1_bf16(svbool_t, bfloat16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_bf16))) +svbfloat16_t svld1_vnum_bf16(svbool_t, bfloat16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1rq_bf16))) +svbfloat16_t svld1rq_bf16(svbool_t, bfloat16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_bf16))) +svbfloat16x2_t svld2_bf16(svbool_t, bfloat16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_vnum_bf16))) +svbfloat16x2_t svld2_vnum_bf16(svbool_t, bfloat16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_bf16))) +svbfloat16x3_t svld3_bf16(svbool_t, bfloat16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_vnum_bf16))) +svbfloat16x3_t svld3_vnum_bf16(svbool_t, bfloat16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_bf16))) +svbfloat16x4_t svld4_bf16(svbool_t, bfloat16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_vnum_bf16))) +svbfloat16x4_t svld4_vnum_bf16(svbool_t, bfloat16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_bf16))) +svbfloat16_t svldff1_bf16(svbool_t, bfloat16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_vnum_bf16))) +svbfloat16_t svldff1_vnum_bf16(svbool_t, bfloat16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_bf16))) +svbfloat16_t svldnf1_bf16(svbool_t, bfloat16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_vnum_bf16))) +svbfloat16_t svldnf1_vnum_bf16(svbool_t, bfloat16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_bf16))) +svbfloat16_t svldnt1_bf16(svbool_t, bfloat16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_bf16))) +svbfloat16_t svldnt1_vnum_bf16(svbool_t, bfloat16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlen_bf16))) +uint64_t svlen_bf16(svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrev_bf16))) +svbfloat16_t svrev_bf16(svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_bf16))) +svbfloat16_t svsel_bf16(svbool_t, svbfloat16_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset2_bf16))) +svbfloat16x2_t svset2_bf16(svbfloat16x2_t, uint64_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset3_bf16))) +svbfloat16x3_t svset3_bf16(svbfloat16x3_t, uint64_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset4_bf16))) +svbfloat16x4_t svset4_bf16(svbfloat16x4_t, uint64_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsplice_bf16))) +svbfloat16_t svsplice_bf16(svbool_t, svbfloat16_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_bf16))) +void svst1_bf16(svbool_t, bfloat16_t *, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_bf16))) +void svst1_vnum_bf16(svbool_t, bfloat16_t *, int64_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_bf16))) +void svst2_bf16(svbool_t, bfloat16_t *, svbfloat16x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_vnum_bf16))) +void svst2_vnum_bf16(svbool_t, bfloat16_t *, int64_t, svbfloat16x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_bf16))) +void svst3_bf16(svbool_t, bfloat16_t *, svbfloat16x3_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_vnum_bf16))) +void svst3_vnum_bf16(svbool_t, bfloat16_t *, int64_t, svbfloat16x3_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_bf16))) +void svst4_bf16(svbool_t, bfloat16_t *, svbfloat16x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_vnum_bf16))) +void svst4_vnum_bf16(svbool_t, bfloat16_t *, int64_t, svbfloat16x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_bf16))) +void svstnt1_bf16(svbool_t, bfloat16_t *, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_bf16))) +void svstnt1_vnum_bf16(svbool_t, bfloat16_t *, int64_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl_bf16))) +svbfloat16_t svtbl_bf16(svbfloat16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1_bf16))) +svbfloat16_t svtrn1_bf16(svbfloat16_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2_bf16))) +svbfloat16_t svtrn2_bf16(svbfloat16_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef2_bf16))) +svbfloat16x2_t svundef2_bf16(void); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef3_bf16))) +svbfloat16x3_t svundef3_bf16(void); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef4_bf16))) +svbfloat16x4_t svundef4_bf16(void); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef_bf16))) +svbfloat16_t svundef_bf16(void); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1_bf16))) +svbfloat16_t svuzp1_bf16(svbfloat16_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2_bf16))) +svbfloat16_t svuzp2_bf16(svbfloat16_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1_bf16))) +svbfloat16_t svzip1_bf16(svbfloat16_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2_bf16))) +svbfloat16_t svzip2_bf16(svbfloat16_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbfdot_n_f32))) +svfloat32_t svbfdot(svfloat32_t, svbfloat16_t, bfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbfdot_f32))) +svfloat32_t svbfdot(svfloat32_t, svbfloat16_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbfdot_lane_f32))) +svfloat32_t svbfdot_lane(svfloat32_t, svbfloat16_t, svbfloat16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbfmlalb_n_f32))) +svfloat32_t svbfmlalb(svfloat32_t, svbfloat16_t, bfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbfmlalb_f32))) +svfloat32_t svbfmlalb(svfloat32_t, svbfloat16_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbfmlalb_lane_f32))) +svfloat32_t svbfmlalb_lane(svfloat32_t, svbfloat16_t, svbfloat16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbfmlalt_n_f32))) +svfloat32_t svbfmlalt(svfloat32_t, svbfloat16_t, bfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbfmlalt_f32))) +svfloat32_t svbfmlalt(svfloat32_t, svbfloat16_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbfmlalt_lane_f32))) +svfloat32_t svbfmlalt_lane(svfloat32_t, svbfloat16_t, svbfloat16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbfmmla_f32))) +svfloat32_t svbfmmla(svfloat32_t, svbfloat16_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_n_bf16))) +bfloat16_t svclasta(svbool_t, bfloat16_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_bf16))) +svbfloat16_t svclasta(svbool_t, svbfloat16_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_n_bf16))) +bfloat16_t svclastb(svbool_t, bfloat16_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_bf16))) +svbfloat16_t svclastb(svbool_t, svbfloat16_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_bf16_m))) +svuint16_t svcnt_m(svuint16_t, svbool_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_bf16_x))) +svuint16_t svcnt_x(svbool_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_bf16_z))) +svuint16_t svcnt_z(svbool_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate2_bf16))) +svbfloat16x2_t svcreate2(svbfloat16_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate3_bf16))) +svbfloat16x3_t svcreate3(svbfloat16_t, svbfloat16_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate4_bf16))) +svbfloat16x4_t svcreate4(svbfloat16_t, svbfloat16_t, svbfloat16_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_bf16_f32_m))) +svbfloat16_t svcvt_bf16_m(svbfloat16_t, svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_bf16_f32_x))) +svbfloat16_t svcvt_bf16_x(svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_bf16_f32_z))) +svbfloat16_t svcvt_bf16_z(svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvtnt_bf16_f32_m))) +svbfloat16_t svcvtnt_bf16_m(svbfloat16_t, svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_bf16))) +svbfloat16_t svdup_bf16(bfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_bf16_m))) +svbfloat16_t svdup_bf16_m(svbfloat16_t, svbool_t, bfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_bf16_x))) +svbfloat16_t svdup_bf16_x(svbool_t, bfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_bf16_z))) +svbfloat16_t svdup_bf16_z(svbool_t, bfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_lane_bf16))) +svbfloat16_t svdup_lane(svbfloat16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_bf16))) +svbfloat16_t svdupq_bf16(bfloat16_t, bfloat16_t, bfloat16_t, bfloat16_t, bfloat16_t, bfloat16_t, bfloat16_t, bfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_lane_bf16))) +svbfloat16_t svdupq_lane(svbfloat16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svext_bf16))) +svbfloat16_t svext(svbfloat16_t, svbfloat16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget2_bf16))) +svbfloat16_t svget2(svbfloat16x2_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget3_bf16))) +svbfloat16_t svget3(svbfloat16x3_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget4_bf16))) +svbfloat16_t svget4(svbfloat16x4_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svinsr_n_bf16))) +svbfloat16_t svinsr(svbfloat16_t, bfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlasta_bf16))) +bfloat16_t svlasta(svbool_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlastb_bf16))) +bfloat16_t svlastb(svbool_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_bf16))) +svbfloat16_t svld1(svbool_t, bfloat16_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_bf16))) +svbfloat16_t svld1_vnum(svbool_t, bfloat16_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1rq_bf16))) +svbfloat16_t svld1rq(svbool_t, bfloat16_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_bf16))) +svbfloat16x2_t svld2(svbool_t, bfloat16_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_vnum_bf16))) +svbfloat16x2_t svld2_vnum(svbool_t, bfloat16_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_bf16))) +svbfloat16x3_t svld3(svbool_t, bfloat16_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_vnum_bf16))) +svbfloat16x3_t svld3_vnum(svbool_t, bfloat16_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_bf16))) +svbfloat16x4_t svld4(svbool_t, bfloat16_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_vnum_bf16))) +svbfloat16x4_t svld4_vnum(svbool_t, bfloat16_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_bf16))) +svbfloat16_t svldff1(svbool_t, bfloat16_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_vnum_bf16))) +svbfloat16_t svldff1_vnum(svbool_t, bfloat16_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_bf16))) +svbfloat16_t svldnf1(svbool_t, bfloat16_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_vnum_bf16))) +svbfloat16_t svldnf1_vnum(svbool_t, bfloat16_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_bf16))) +svbfloat16_t svldnt1(svbool_t, bfloat16_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_bf16))) +svbfloat16_t svldnt1_vnum(svbool_t, bfloat16_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlen_bf16))) +uint64_t svlen(svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrev_bf16))) +svbfloat16_t svrev(svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_bf16))) +svbfloat16_t svsel(svbool_t, svbfloat16_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset2_bf16))) +svbfloat16x2_t svset2(svbfloat16x2_t, uint64_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset3_bf16))) +svbfloat16x3_t svset3(svbfloat16x3_t, uint64_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset4_bf16))) +svbfloat16x4_t svset4(svbfloat16x4_t, uint64_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsplice_bf16))) +svbfloat16_t svsplice(svbool_t, svbfloat16_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_bf16))) +void svst1(svbool_t, bfloat16_t *, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_bf16))) +void svst1_vnum(svbool_t, bfloat16_t *, int64_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_bf16))) +void svst2(svbool_t, bfloat16_t *, svbfloat16x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_vnum_bf16))) +void svst2_vnum(svbool_t, bfloat16_t *, int64_t, svbfloat16x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_bf16))) +void svst3(svbool_t, bfloat16_t *, svbfloat16x3_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_vnum_bf16))) +void svst3_vnum(svbool_t, bfloat16_t *, int64_t, svbfloat16x3_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_bf16))) +void svst4(svbool_t, bfloat16_t *, svbfloat16x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_vnum_bf16))) +void svst4_vnum(svbool_t, bfloat16_t *, int64_t, svbfloat16x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_bf16))) +void svstnt1(svbool_t, bfloat16_t *, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_bf16))) +void svstnt1_vnum(svbool_t, bfloat16_t *, int64_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl_bf16))) +svbfloat16_t svtbl(svbfloat16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1_bf16))) +svbfloat16_t svtrn1(svbfloat16_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2_bf16))) +svbfloat16_t svtrn2(svbfloat16_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1_bf16))) +svbfloat16_t svuzp1(svbfloat16_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2_bf16))) +svbfloat16_t svuzp2(svbfloat16_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1_bf16))) +svbfloat16_t svzip1(svbfloat16_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2_bf16))) +svbfloat16_t svzip2(svbfloat16_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_bf16))) +svbfloat16_t svtrn1q_bf16(svbfloat16_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_bf16))) +svbfloat16_t svtrn2q_bf16(svbfloat16_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_bf16))) +svbfloat16_t svuzp1q_bf16(svbfloat16_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_bf16))) +svbfloat16_t svuzp2q_bf16(svbfloat16_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_bf16))) +svbfloat16_t svzip1q_bf16(svbfloat16_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_bf16))) +svbfloat16_t svzip2q_bf16(svbfloat16_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_bf16))) +svbfloat16_t svtrn1q(svbfloat16_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_bf16))) +svbfloat16_t svtrn2q(svbfloat16_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_bf16))) +svbfloat16_t svuzp1q(svbfloat16_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_bf16))) +svbfloat16_t svuzp2q(svbfloat16_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_bf16))) +svbfloat16_t svzip1q(svbfloat16_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_bf16))) +svbfloat16_t svzip2q(svbfloat16_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmmla_f32))) +svfloat32_t svmmla_f32(svfloat32_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmmla_f32))) +svfloat32_t svmmla(svfloat32_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_u8))) +svuint8_t svld1ro_u8(svbool_t, uint8_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_u32))) +svuint32_t svld1ro_u32(svbool_t, uint32_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_u64))) +svuint64_t svld1ro_u64(svbool_t, uint64_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_u16))) +svuint16_t svld1ro_u16(svbool_t, uint16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_s8))) +svint8_t svld1ro_s8(svbool_t, int8_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_f64))) +svfloat64_t svld1ro_f64(svbool_t, float64_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_f32))) +svfloat32_t svld1ro_f32(svbool_t, float32_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_f16))) +svfloat16_t svld1ro_f16(svbool_t, float16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_s32))) +svint32_t svld1ro_s32(svbool_t, int32_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_s64))) +svint64_t svld1ro_s64(svbool_t, int64_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_s16))) +svint16_t svld1ro_s16(svbool_t, int16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmmla_f64))) +svfloat64_t svmmla_f64(svfloat64_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_u8))) +svuint8_t svtrn1q_u8(svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_u32))) +svuint32_t svtrn1q_u32(svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_u64))) +svuint64_t svtrn1q_u64(svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_u16))) +svuint16_t svtrn1q_u16(svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_s8))) +svint8_t svtrn1q_s8(svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_f64))) +svfloat64_t svtrn1q_f64(svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_f32))) +svfloat32_t svtrn1q_f32(svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_f16))) +svfloat16_t svtrn1q_f16(svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_s32))) +svint32_t svtrn1q_s32(svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_s64))) +svint64_t svtrn1q_s64(svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_s16))) +svint16_t svtrn1q_s16(svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_u8))) +svuint8_t svtrn2q_u8(svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_u32))) +svuint32_t svtrn2q_u32(svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_u64))) +svuint64_t svtrn2q_u64(svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_u16))) +svuint16_t svtrn2q_u16(svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_s8))) +svint8_t svtrn2q_s8(svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_f64))) +svfloat64_t svtrn2q_f64(svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_f32))) +svfloat32_t svtrn2q_f32(svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_f16))) +svfloat16_t svtrn2q_f16(svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_s32))) +svint32_t svtrn2q_s32(svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_s64))) +svint64_t svtrn2q_s64(svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_s16))) +svint16_t svtrn2q_s16(svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_u8))) +svuint8_t svuzp1q_u8(svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_u32))) +svuint32_t svuzp1q_u32(svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_u64))) +svuint64_t svuzp1q_u64(svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_u16))) +svuint16_t svuzp1q_u16(svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_s8))) +svint8_t svuzp1q_s8(svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_f64))) +svfloat64_t svuzp1q_f64(svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_f32))) +svfloat32_t svuzp1q_f32(svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_f16))) +svfloat16_t svuzp1q_f16(svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_s32))) +svint32_t svuzp1q_s32(svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_s64))) +svint64_t svuzp1q_s64(svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_s16))) +svint16_t svuzp1q_s16(svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_u8))) +svuint8_t svuzp2q_u8(svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_u32))) +svuint32_t svuzp2q_u32(svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_u64))) +svuint64_t svuzp2q_u64(svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_u16))) +svuint16_t svuzp2q_u16(svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_s8))) +svint8_t svuzp2q_s8(svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_f64))) +svfloat64_t svuzp2q_f64(svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_f32))) +svfloat32_t svuzp2q_f32(svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_f16))) +svfloat16_t svuzp2q_f16(svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_s32))) +svint32_t svuzp2q_s32(svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_s64))) +svint64_t svuzp2q_s64(svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_s16))) +svint16_t svuzp2q_s16(svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_u8))) +svuint8_t svzip1q_u8(svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_u32))) +svuint32_t svzip1q_u32(svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_u64))) +svuint64_t svzip1q_u64(svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_u16))) +svuint16_t svzip1q_u16(svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_s8))) +svint8_t svzip1q_s8(svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_f64))) +svfloat64_t svzip1q_f64(svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_f32))) +svfloat32_t svzip1q_f32(svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_f16))) +svfloat16_t svzip1q_f16(svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_s32))) +svint32_t svzip1q_s32(svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_s64))) +svint64_t svzip1q_s64(svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_s16))) +svint16_t svzip1q_s16(svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_u8))) +svuint8_t svzip2q_u8(svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_u32))) +svuint32_t svzip2q_u32(svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_u64))) +svuint64_t svzip2q_u64(svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_u16))) +svuint16_t svzip2q_u16(svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_s8))) +svint8_t svzip2q_s8(svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_f64))) +svfloat64_t svzip2q_f64(svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_f32))) +svfloat32_t svzip2q_f32(svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_f16))) +svfloat16_t svzip2q_f16(svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_s32))) +svint32_t svzip2q_s32(svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_s64))) +svint64_t svzip2q_s64(svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_s16))) +svint16_t svzip2q_s16(svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_u8))) +svuint8_t svld1ro(svbool_t, uint8_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_u32))) +svuint32_t svld1ro(svbool_t, uint32_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_u64))) +svuint64_t svld1ro(svbool_t, uint64_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_u16))) +svuint16_t svld1ro(svbool_t, uint16_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_s8))) +svint8_t svld1ro(svbool_t, int8_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_f64))) +svfloat64_t svld1ro(svbool_t, float64_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_f32))) +svfloat32_t svld1ro(svbool_t, float32_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_f16))) +svfloat16_t svld1ro(svbool_t, float16_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_s32))) +svint32_t svld1ro(svbool_t, int32_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_s64))) +svint64_t svld1ro(svbool_t, int64_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_s16))) +svint16_t svld1ro(svbool_t, int16_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmmla_f64))) +svfloat64_t svmmla(svfloat64_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_u8))) +svuint8_t svtrn1q(svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_u32))) +svuint32_t svtrn1q(svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_u64))) +svuint64_t svtrn1q(svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_u16))) +svuint16_t svtrn1q(svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_s8))) +svint8_t svtrn1q(svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_f64))) +svfloat64_t svtrn1q(svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_f32))) +svfloat32_t svtrn1q(svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_f16))) +svfloat16_t svtrn1q(svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_s32))) +svint32_t svtrn1q(svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_s64))) +svint64_t svtrn1q(svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_s16))) +svint16_t svtrn1q(svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_u8))) +svuint8_t svtrn2q(svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_u32))) +svuint32_t svtrn2q(svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_u64))) +svuint64_t svtrn2q(svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_u16))) +svuint16_t svtrn2q(svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_s8))) +svint8_t svtrn2q(svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_f64))) +svfloat64_t svtrn2q(svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_f32))) +svfloat32_t svtrn2q(svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_f16))) +svfloat16_t svtrn2q(svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_s32))) +svint32_t svtrn2q(svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_s64))) +svint64_t svtrn2q(svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_s16))) +svint16_t svtrn2q(svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_u8))) +svuint8_t svuzp1q(svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_u32))) +svuint32_t svuzp1q(svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_u64))) +svuint64_t svuzp1q(svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_u16))) +svuint16_t svuzp1q(svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_s8))) +svint8_t svuzp1q(svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_f64))) +svfloat64_t svuzp1q(svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_f32))) +svfloat32_t svuzp1q(svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_f16))) +svfloat16_t svuzp1q(svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_s32))) +svint32_t svuzp1q(svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_s64))) +svint64_t svuzp1q(svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_s16))) +svint16_t svuzp1q(svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_u8))) +svuint8_t svuzp2q(svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_u32))) +svuint32_t svuzp2q(svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_u64))) +svuint64_t svuzp2q(svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_u16))) +svuint16_t svuzp2q(svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_s8))) +svint8_t svuzp2q(svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_f64))) +svfloat64_t svuzp2q(svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_f32))) +svfloat32_t svuzp2q(svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_f16))) +svfloat16_t svuzp2q(svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_s32))) +svint32_t svuzp2q(svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_s64))) +svint64_t svuzp2q(svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_s16))) +svint16_t svuzp2q(svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_u8))) +svuint8_t svzip1q(svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_u32))) +svuint32_t svzip1q(svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_u64))) +svuint64_t svzip1q(svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_u16))) +svuint16_t svzip1q(svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_s8))) +svint8_t svzip1q(svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_f64))) +svfloat64_t svzip1q(svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_f32))) +svfloat32_t svzip1q(svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_f16))) +svfloat16_t svzip1q(svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_s32))) +svint32_t svzip1q(svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_s64))) +svint64_t svzip1q(svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_s16))) +svint16_t svzip1q(svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_u8))) +svuint8_t svzip2q(svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_u32))) +svuint32_t svzip2q(svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_u64))) +svuint64_t svzip2q(svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_u16))) +svuint16_t svzip2q(svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_s8))) +svint8_t svzip2q(svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_f64))) +svfloat64_t svzip2q(svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_f32))) +svfloat32_t svzip2q(svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_f16))) +svfloat16_t svzip2q(svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_s32))) +svint32_t svzip2q(svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_s64))) +svint64_t svzip2q(svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_s16))) +svint16_t svzip2q(svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_bf16))) +svbfloat16_t svld1ro_bf16(svbool_t, bfloat16_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_bf16))) +svbfloat16_t svld1ro(svbool_t, bfloat16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmmla_s32))) +svint32_t svmmla_s32(svint32_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmmla_u32))) +svuint32_t svmmla_u32(svuint32_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsudot_n_s32))) +svint32_t svsudot_n_s32(svint32_t, svint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsudot_s32))) +svint32_t svsudot_s32(svint32_t, svint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsudot_lane_s32))) +svint32_t svsudot_lane_s32(svint32_t, svint8_t, svuint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svusdot_n_s32))) +svint32_t svusdot_n_s32(svint32_t, svuint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svusdot_s32))) +svint32_t svusdot_s32(svint32_t, svuint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svusdot_lane_s32))) +svint32_t svusdot_lane_s32(svint32_t, svuint8_t, svint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svusmmla_s32))) +svint32_t svusmmla_s32(svint32_t, svuint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmmla_s32))) +svint32_t svmmla(svint32_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmmla_u32))) +svuint32_t svmmla(svuint32_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsudot_n_s32))) +svint32_t svsudot(svint32_t, svint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsudot_s32))) +svint32_t svsudot(svint32_t, svint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsudot_lane_s32))) +svint32_t svsudot_lane(svint32_t, svint8_t, svuint8_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svusdot_n_s32))) +svint32_t svusdot(svint32_t, svuint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svusdot_s32))) +svint32_t svusdot(svint32_t, svuint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svusdot_lane_s32))) +svint32_t svusdot_lane(svint32_t, svuint8_t, svint8_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svusmmla_s32))) +svint32_t svusmmla(svint32_t, svuint8_t, svint8_t); __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_n_s8))) svint8_t svaba_n_s8(svint8_t, svint8_t, int8_t); __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_n_s32))) @@ -23227,31 +23714,22 @@ __aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svxar_n_s64))) svint64_t svxar(svint64_t, svint64_t, uint64_t); __aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svxar_n_s16))) svint16_t svxar(svint16_t, svint16_t, uint64_t); -#endif //defined(__ARM_FEATURE_SVE2) - -#if defined(__ARM_FEATURE_SVE2) && defined(__ARM_FEATURE_BF16_SCALAR_ARITHMETIC) -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilerw_bf16))) -svbool_t svwhilerw_bf16(bfloat16_t const *, bfloat16_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilewr_bf16))) -svbool_t svwhilewr_bf16(bfloat16_t const *, bfloat16_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilerw_bf16))) -svbool_t svwhilerw(bfloat16_t const *, bfloat16_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilewr_bf16))) -svbool_t svwhilewr(bfloat16_t const *, bfloat16_t const *); -#endif //defined(__ARM_FEATURE_SVE2) && defined(__ARM_FEATURE_BF16_SCALAR_ARITHMETIC) - -#if defined(__ARM_FEATURE_SVE2) && defined(__ARM_FEATURE_SVE_BF16) __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl2_bf16))) svbfloat16_t svtbl2_bf16(svbfloat16x2_t, svuint16_t); __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbx_bf16))) svbfloat16_t svtbx_bf16(svbfloat16_t, svbfloat16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilerw_bf16))) +svbool_t svwhilerw_bf16(bfloat16_t const *, bfloat16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilewr_bf16))) +svbool_t svwhilewr_bf16(bfloat16_t const *, bfloat16_t const *); __aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl2_bf16))) svbfloat16_t svtbl2(svbfloat16x2_t, svuint16_t); __aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbx_bf16))) svbfloat16_t svtbx(svbfloat16_t, svbfloat16_t, svuint16_t); -#endif //defined(__ARM_FEATURE_SVE2) && defined(__ARM_FEATURE_SVE_BF16) - -#if defined(__ARM_FEATURE_SVE2_AES) +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilerw_bf16))) +svbool_t svwhilerw(bfloat16_t const *, bfloat16_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilewr_bf16))) +svbool_t svwhilewr(bfloat16_t const *, bfloat16_t const *); __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaesd_u8))) svuint8_t svaesd_u8(svuint8_t, svuint8_t); __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaese_u8))) @@ -23284,9 +23762,102 @@ __aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullt_pair_n_u64 svuint64_t svpmullt_pair(svuint64_t, uint64_t); __aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullt_pair_u64))) svuint64_t svpmullt_pair(svuint64_t, svuint64_t); -#endif //defined(__ARM_FEATURE_SVE2_AES) - -#if defined(__ARM_FEATURE_SVE2_SHA3) +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbdep_n_u8))) +svuint8_t svbdep_n_u8(svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbdep_n_u32))) +svuint32_t svbdep_n_u32(svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbdep_n_u64))) +svuint64_t svbdep_n_u64(svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbdep_n_u16))) +svuint16_t svbdep_n_u16(svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbdep_u8))) +svuint8_t svbdep_u8(svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbdep_u32))) +svuint32_t svbdep_u32(svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbdep_u64))) +svuint64_t svbdep_u64(svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbdep_u16))) +svuint16_t svbdep_u16(svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbext_n_u8))) +svuint8_t svbext_n_u8(svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbext_n_u32))) +svuint32_t svbext_n_u32(svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbext_n_u64))) +svuint64_t svbext_n_u64(svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbext_n_u16))) +svuint16_t svbext_n_u16(svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbext_u8))) +svuint8_t svbext_u8(svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbext_u32))) +svuint32_t svbext_u32(svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbext_u64))) +svuint64_t svbext_u64(svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbext_u16))) +svuint16_t svbext_u16(svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbgrp_n_u8))) +svuint8_t svbgrp_n_u8(svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbgrp_n_u32))) +svuint32_t svbgrp_n_u32(svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbgrp_n_u64))) +svuint64_t svbgrp_n_u64(svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbgrp_n_u16))) +svuint16_t svbgrp_n_u16(svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbgrp_u8))) +svuint8_t svbgrp_u8(svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbgrp_u32))) +svuint32_t svbgrp_u32(svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbgrp_u64))) +svuint64_t svbgrp_u64(svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbgrp_u16))) +svuint16_t svbgrp_u16(svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbdep_n_u8))) +svuint8_t svbdep(svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbdep_n_u32))) +svuint32_t svbdep(svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbdep_n_u64))) +svuint64_t svbdep(svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbdep_n_u16))) +svuint16_t svbdep(svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbdep_u8))) +svuint8_t svbdep(svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbdep_u32))) +svuint32_t svbdep(svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbdep_u64))) +svuint64_t svbdep(svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbdep_u16))) +svuint16_t svbdep(svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbext_n_u8))) +svuint8_t svbext(svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbext_n_u32))) +svuint32_t svbext(svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbext_n_u64))) +svuint64_t svbext(svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbext_n_u16))) +svuint16_t svbext(svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbext_u8))) +svuint8_t svbext(svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbext_u32))) +svuint32_t svbext(svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbext_u64))) +svuint64_t svbext(svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbext_u16))) +svuint16_t svbext(svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbgrp_n_u8))) +svuint8_t svbgrp(svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbgrp_n_u32))) +svuint32_t svbgrp(svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbgrp_n_u64))) +svuint64_t svbgrp(svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbgrp_n_u16))) +svuint16_t svbgrp(svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbgrp_u8))) +svuint8_t svbgrp(svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbgrp_u32))) +svuint32_t svbgrp(svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbgrp_u64))) +svuint64_t svbgrp(svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbgrp_u16))) +svuint16_t svbgrp(svuint16_t, svuint16_t); __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrax1_u64))) svuint64_t svrax1_u64(svuint64_t, svuint64_t); __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrax1_s64))) @@ -23295,9 +23866,6 @@ __aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrax1_u64))) svuint64_t svrax1(svuint64_t, svuint64_t); __aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrax1_s64))) svint64_t svrax1(svint64_t, svint64_t); -#endif //defined(__ARM_FEATURE_SVE2_SHA3) - -#if defined(__ARM_FEATURE_SVE2_SM4) __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsm4e_u32))) svuint32_t svsm4e_u32(svuint32_t, svuint32_t); __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsm4ekey_u32))) @@ -23306,724 +23874,8 @@ __aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsm4e_u32))) svuint32_t svsm4e(svuint32_t, svuint32_t); __aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsm4ekey_u32))) svuint32_t svsm4ekey(svuint32_t, svuint32_t); -#endif //defined(__ARM_FEATURE_SVE2_SM4) - -#if defined(__ARM_FEATURE_SVE_BF16) -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbfdot_n_f32))) -svfloat32_t svbfdot_n_f32(svfloat32_t, svbfloat16_t, bfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbfdot_f32))) -svfloat32_t svbfdot_f32(svfloat32_t, svbfloat16_t, svbfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbfdot_lane_f32))) -svfloat32_t svbfdot_lane_f32(svfloat32_t, svbfloat16_t, svbfloat16_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbfmlalb_n_f32))) -svfloat32_t svbfmlalb_n_f32(svfloat32_t, svbfloat16_t, bfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbfmlalb_f32))) -svfloat32_t svbfmlalb_f32(svfloat32_t, svbfloat16_t, svbfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbfmlalb_lane_f32))) -svfloat32_t svbfmlalb_lane_f32(svfloat32_t, svbfloat16_t, svbfloat16_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbfmlalt_n_f32))) -svfloat32_t svbfmlalt_n_f32(svfloat32_t, svbfloat16_t, bfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbfmlalt_f32))) -svfloat32_t svbfmlalt_f32(svfloat32_t, svbfloat16_t, svbfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbfmlalt_lane_f32))) -svfloat32_t svbfmlalt_lane_f32(svfloat32_t, svbfloat16_t, svbfloat16_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbfmmla_f32))) -svfloat32_t svbfmmla_f32(svfloat32_t, svbfloat16_t, svbfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_n_bf16))) -bfloat16_t svclasta_n_bf16(svbool_t, bfloat16_t, svbfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_bf16))) -svbfloat16_t svclasta_bf16(svbool_t, svbfloat16_t, svbfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_n_bf16))) -bfloat16_t svclastb_n_bf16(svbool_t, bfloat16_t, svbfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_bf16))) -svbfloat16_t svclastb_bf16(svbool_t, svbfloat16_t, svbfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_bf16_m))) -svuint16_t svcnt_bf16_m(svuint16_t, svbool_t, svbfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_bf16_x))) -svuint16_t svcnt_bf16_x(svbool_t, svbfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_bf16_z))) -svuint16_t svcnt_bf16_z(svbool_t, svbfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate2_bf16))) -svbfloat16x2_t svcreate2_bf16(svbfloat16_t, svbfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate3_bf16))) -svbfloat16x3_t svcreate3_bf16(svbfloat16_t, svbfloat16_t, svbfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate4_bf16))) -svbfloat16x4_t svcreate4_bf16(svbfloat16_t, svbfloat16_t, svbfloat16_t, svbfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_bf16_f32_m))) -svbfloat16_t svcvt_bf16_f32_m(svbfloat16_t, svbool_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_bf16_f32_x))) -svbfloat16_t svcvt_bf16_f32_x(svbool_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_bf16_f32_z))) -svbfloat16_t svcvt_bf16_f32_z(svbool_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvtnt_bf16_f32_m))) -svbfloat16_t svcvtnt_bf16_f32_m(svbfloat16_t, svbool_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_bf16))) -svbfloat16_t svdup_n_bf16(bfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_bf16_m))) -svbfloat16_t svdup_n_bf16_m(svbfloat16_t, svbool_t, bfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_bf16_x))) -svbfloat16_t svdup_n_bf16_x(svbool_t, bfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_bf16_z))) -svbfloat16_t svdup_n_bf16_z(svbool_t, bfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_lane_bf16))) -svbfloat16_t svdup_lane_bf16(svbfloat16_t, uint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_bf16))) -svbfloat16_t svdupq_n_bf16(bfloat16_t, bfloat16_t, bfloat16_t, bfloat16_t, bfloat16_t, bfloat16_t, bfloat16_t, bfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_lane_bf16))) -svbfloat16_t svdupq_lane_bf16(svbfloat16_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svext_bf16))) -svbfloat16_t svext_bf16(svbfloat16_t, svbfloat16_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget2_bf16))) -svbfloat16_t svget2_bf16(svbfloat16x2_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget3_bf16))) -svbfloat16_t svget3_bf16(svbfloat16x3_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget4_bf16))) -svbfloat16_t svget4_bf16(svbfloat16x4_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svinsr_n_bf16))) -svbfloat16_t svinsr_n_bf16(svbfloat16_t, bfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlasta_bf16))) -bfloat16_t svlasta_bf16(svbool_t, svbfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlastb_bf16))) -bfloat16_t svlastb_bf16(svbool_t, svbfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_bf16))) -svbfloat16_t svld1_bf16(svbool_t, bfloat16_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_bf16))) -svbfloat16_t svld1_vnum_bf16(svbool_t, bfloat16_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1rq_bf16))) -svbfloat16_t svld1rq_bf16(svbool_t, bfloat16_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_bf16))) -svbfloat16x2_t svld2_bf16(svbool_t, bfloat16_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_vnum_bf16))) -svbfloat16x2_t svld2_vnum_bf16(svbool_t, bfloat16_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_bf16))) -svbfloat16x3_t svld3_bf16(svbool_t, bfloat16_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_vnum_bf16))) -svbfloat16x3_t svld3_vnum_bf16(svbool_t, bfloat16_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_bf16))) -svbfloat16x4_t svld4_bf16(svbool_t, bfloat16_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_vnum_bf16))) -svbfloat16x4_t svld4_vnum_bf16(svbool_t, bfloat16_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_bf16))) -svbfloat16_t svldff1_bf16(svbool_t, bfloat16_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_vnum_bf16))) -svbfloat16_t svldff1_vnum_bf16(svbool_t, bfloat16_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_bf16))) -svbfloat16_t svldnf1_bf16(svbool_t, bfloat16_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_vnum_bf16))) -svbfloat16_t svldnf1_vnum_bf16(svbool_t, bfloat16_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_bf16))) -svbfloat16_t svldnt1_bf16(svbool_t, bfloat16_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_bf16))) -svbfloat16_t svldnt1_vnum_bf16(svbool_t, bfloat16_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlen_bf16))) -uint64_t svlen_bf16(svbfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrev_bf16))) -svbfloat16_t svrev_bf16(svbfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_bf16))) -svbfloat16_t svsel_bf16(svbool_t, svbfloat16_t, svbfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset2_bf16))) -svbfloat16x2_t svset2_bf16(svbfloat16x2_t, uint64_t, svbfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset3_bf16))) -svbfloat16x3_t svset3_bf16(svbfloat16x3_t, uint64_t, svbfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset4_bf16))) -svbfloat16x4_t svset4_bf16(svbfloat16x4_t, uint64_t, svbfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsplice_bf16))) -svbfloat16_t svsplice_bf16(svbool_t, svbfloat16_t, svbfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_bf16))) -void svst1_bf16(svbool_t, bfloat16_t *, svbfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_bf16))) -void svst1_vnum_bf16(svbool_t, bfloat16_t *, int64_t, svbfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_bf16))) -void svst2_bf16(svbool_t, bfloat16_t *, svbfloat16x2_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_vnum_bf16))) -void svst2_vnum_bf16(svbool_t, bfloat16_t *, int64_t, svbfloat16x2_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_bf16))) -void svst3_bf16(svbool_t, bfloat16_t *, svbfloat16x3_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_vnum_bf16))) -void svst3_vnum_bf16(svbool_t, bfloat16_t *, int64_t, svbfloat16x3_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_bf16))) -void svst4_bf16(svbool_t, bfloat16_t *, svbfloat16x4_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_vnum_bf16))) -void svst4_vnum_bf16(svbool_t, bfloat16_t *, int64_t, svbfloat16x4_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_bf16))) -void svstnt1_bf16(svbool_t, bfloat16_t *, svbfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_bf16))) -void svstnt1_vnum_bf16(svbool_t, bfloat16_t *, int64_t, svbfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl_bf16))) -svbfloat16_t svtbl_bf16(svbfloat16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1_bf16))) -svbfloat16_t svtrn1_bf16(svbfloat16_t, svbfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2_bf16))) -svbfloat16_t svtrn2_bf16(svbfloat16_t, svbfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef2_bf16))) -svbfloat16x2_t svundef2_bf16(void); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef3_bf16))) -svbfloat16x3_t svundef3_bf16(void); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef4_bf16))) -svbfloat16x4_t svundef4_bf16(void); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef_bf16))) -svbfloat16_t svundef_bf16(void); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1_bf16))) -svbfloat16_t svuzp1_bf16(svbfloat16_t, svbfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2_bf16))) -svbfloat16_t svuzp2_bf16(svbfloat16_t, svbfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1_bf16))) -svbfloat16_t svzip1_bf16(svbfloat16_t, svbfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2_bf16))) -svbfloat16_t svzip2_bf16(svbfloat16_t, svbfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbfdot_n_f32))) -svfloat32_t svbfdot(svfloat32_t, svbfloat16_t, bfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbfdot_f32))) -svfloat32_t svbfdot(svfloat32_t, svbfloat16_t, svbfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbfdot_lane_f32))) -svfloat32_t svbfdot_lane(svfloat32_t, svbfloat16_t, svbfloat16_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbfmlalb_n_f32))) -svfloat32_t svbfmlalb(svfloat32_t, svbfloat16_t, bfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbfmlalb_f32))) -svfloat32_t svbfmlalb(svfloat32_t, svbfloat16_t, svbfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbfmlalb_lane_f32))) -svfloat32_t svbfmlalb_lane(svfloat32_t, svbfloat16_t, svbfloat16_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbfmlalt_n_f32))) -svfloat32_t svbfmlalt(svfloat32_t, svbfloat16_t, bfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbfmlalt_f32))) -svfloat32_t svbfmlalt(svfloat32_t, svbfloat16_t, svbfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbfmlalt_lane_f32))) -svfloat32_t svbfmlalt_lane(svfloat32_t, svbfloat16_t, svbfloat16_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbfmmla_f32))) -svfloat32_t svbfmmla(svfloat32_t, svbfloat16_t, svbfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_n_bf16))) -bfloat16_t svclasta(svbool_t, bfloat16_t, svbfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_bf16))) -svbfloat16_t svclasta(svbool_t, svbfloat16_t, svbfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_n_bf16))) -bfloat16_t svclastb(svbool_t, bfloat16_t, svbfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_bf16))) -svbfloat16_t svclastb(svbool_t, svbfloat16_t, svbfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_bf16_m))) -svuint16_t svcnt_m(svuint16_t, svbool_t, svbfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_bf16_x))) -svuint16_t svcnt_x(svbool_t, svbfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_bf16_z))) -svuint16_t svcnt_z(svbool_t, svbfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate2_bf16))) -svbfloat16x2_t svcreate2(svbfloat16_t, svbfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate3_bf16))) -svbfloat16x3_t svcreate3(svbfloat16_t, svbfloat16_t, svbfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate4_bf16))) -svbfloat16x4_t svcreate4(svbfloat16_t, svbfloat16_t, svbfloat16_t, svbfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_bf16_f32_m))) -svbfloat16_t svcvt_bf16_m(svbfloat16_t, svbool_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_bf16_f32_x))) -svbfloat16_t svcvt_bf16_x(svbool_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_bf16_f32_z))) -svbfloat16_t svcvt_bf16_z(svbool_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvtnt_bf16_f32_m))) -svbfloat16_t svcvtnt_bf16_m(svbfloat16_t, svbool_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_bf16))) -svbfloat16_t svdup_bf16(bfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_bf16_m))) -svbfloat16_t svdup_bf16_m(svbfloat16_t, svbool_t, bfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_bf16_x))) -svbfloat16_t svdup_bf16_x(svbool_t, bfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_bf16_z))) -svbfloat16_t svdup_bf16_z(svbool_t, bfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_lane_bf16))) -svbfloat16_t svdup_lane(svbfloat16_t, uint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_bf16))) -svbfloat16_t svdupq_bf16(bfloat16_t, bfloat16_t, bfloat16_t, bfloat16_t, bfloat16_t, bfloat16_t, bfloat16_t, bfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_lane_bf16))) -svbfloat16_t svdupq_lane(svbfloat16_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svext_bf16))) -svbfloat16_t svext(svbfloat16_t, svbfloat16_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget2_bf16))) -svbfloat16_t svget2(svbfloat16x2_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget3_bf16))) -svbfloat16_t svget3(svbfloat16x3_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget4_bf16))) -svbfloat16_t svget4(svbfloat16x4_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svinsr_n_bf16))) -svbfloat16_t svinsr(svbfloat16_t, bfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlasta_bf16))) -bfloat16_t svlasta(svbool_t, svbfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlastb_bf16))) -bfloat16_t svlastb(svbool_t, svbfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_bf16))) -svbfloat16_t svld1(svbool_t, bfloat16_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_bf16))) -svbfloat16_t svld1_vnum(svbool_t, bfloat16_t const *, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1rq_bf16))) -svbfloat16_t svld1rq(svbool_t, bfloat16_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_bf16))) -svbfloat16x2_t svld2(svbool_t, bfloat16_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_vnum_bf16))) -svbfloat16x2_t svld2_vnum(svbool_t, bfloat16_t const *, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_bf16))) -svbfloat16x3_t svld3(svbool_t, bfloat16_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_vnum_bf16))) -svbfloat16x3_t svld3_vnum(svbool_t, bfloat16_t const *, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_bf16))) -svbfloat16x4_t svld4(svbool_t, bfloat16_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_vnum_bf16))) -svbfloat16x4_t svld4_vnum(svbool_t, bfloat16_t const *, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_bf16))) -svbfloat16_t svldff1(svbool_t, bfloat16_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_vnum_bf16))) -svbfloat16_t svldff1_vnum(svbool_t, bfloat16_t const *, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_bf16))) -svbfloat16_t svldnf1(svbool_t, bfloat16_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_vnum_bf16))) -svbfloat16_t svldnf1_vnum(svbool_t, bfloat16_t const *, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_bf16))) -svbfloat16_t svldnt1(svbool_t, bfloat16_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_bf16))) -svbfloat16_t svldnt1_vnum(svbool_t, bfloat16_t const *, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlen_bf16))) -uint64_t svlen(svbfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrev_bf16))) -svbfloat16_t svrev(svbfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_bf16))) -svbfloat16_t svsel(svbool_t, svbfloat16_t, svbfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset2_bf16))) -svbfloat16x2_t svset2(svbfloat16x2_t, uint64_t, svbfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset3_bf16))) -svbfloat16x3_t svset3(svbfloat16x3_t, uint64_t, svbfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset4_bf16))) -svbfloat16x4_t svset4(svbfloat16x4_t, uint64_t, svbfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsplice_bf16))) -svbfloat16_t svsplice(svbool_t, svbfloat16_t, svbfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_bf16))) -void svst1(svbool_t, bfloat16_t *, svbfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_bf16))) -void svst1_vnum(svbool_t, bfloat16_t *, int64_t, svbfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_bf16))) -void svst2(svbool_t, bfloat16_t *, svbfloat16x2_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_vnum_bf16))) -void svst2_vnum(svbool_t, bfloat16_t *, int64_t, svbfloat16x2_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_bf16))) -void svst3(svbool_t, bfloat16_t *, svbfloat16x3_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_vnum_bf16))) -void svst3_vnum(svbool_t, bfloat16_t *, int64_t, svbfloat16x3_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_bf16))) -void svst4(svbool_t, bfloat16_t *, svbfloat16x4_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_vnum_bf16))) -void svst4_vnum(svbool_t, bfloat16_t *, int64_t, svbfloat16x4_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_bf16))) -void svstnt1(svbool_t, bfloat16_t *, svbfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_bf16))) -void svstnt1_vnum(svbool_t, bfloat16_t *, int64_t, svbfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl_bf16))) -svbfloat16_t svtbl(svbfloat16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1_bf16))) -svbfloat16_t svtrn1(svbfloat16_t, svbfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2_bf16))) -svbfloat16_t svtrn2(svbfloat16_t, svbfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1_bf16))) -svbfloat16_t svuzp1(svbfloat16_t, svbfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2_bf16))) -svbfloat16_t svuzp2(svbfloat16_t, svbfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1_bf16))) -svbfloat16_t svzip1(svbfloat16_t, svbfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2_bf16))) -svbfloat16_t svzip2(svbfloat16_t, svbfloat16_t); -#endif //defined(__ARM_FEATURE_SVE_BF16) - -#if defined(__ARM_FEATURE_SVE_MATMUL_FP32) -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmmla_f32))) -svfloat32_t svmmla_f32(svfloat32_t, svfloat32_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmmla_f32))) -svfloat32_t svmmla(svfloat32_t, svfloat32_t, svfloat32_t); -#endif //defined(__ARM_FEATURE_SVE_MATMUL_FP32) - -#if defined(__ARM_FEATURE_SVE_MATMUL_FP64) -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_u8))) -svuint8_t svld1ro_u8(svbool_t, uint8_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_u32))) -svuint32_t svld1ro_u32(svbool_t, uint32_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_u64))) -svuint64_t svld1ro_u64(svbool_t, uint64_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_u16))) -svuint16_t svld1ro_u16(svbool_t, uint16_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_s8))) -svint8_t svld1ro_s8(svbool_t, int8_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_f64))) -svfloat64_t svld1ro_f64(svbool_t, float64_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_f32))) -svfloat32_t svld1ro_f32(svbool_t, float32_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_f16))) -svfloat16_t svld1ro_f16(svbool_t, float16_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_s32))) -svint32_t svld1ro_s32(svbool_t, int32_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_s64))) -svint64_t svld1ro_s64(svbool_t, int64_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_s16))) -svint16_t svld1ro_s16(svbool_t, int16_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmmla_f64))) -svfloat64_t svmmla_f64(svfloat64_t, svfloat64_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_u8))) -svuint8_t svtrn1q_u8(svuint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_u32))) -svuint32_t svtrn1q_u32(svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_u64))) -svuint64_t svtrn1q_u64(svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_u16))) -svuint16_t svtrn1q_u16(svuint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_s8))) -svint8_t svtrn1q_s8(svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_f64))) -svfloat64_t svtrn1q_f64(svfloat64_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_f32))) -svfloat32_t svtrn1q_f32(svfloat32_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_f16))) -svfloat16_t svtrn1q_f16(svfloat16_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_s32))) -svint32_t svtrn1q_s32(svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_s64))) -svint64_t svtrn1q_s64(svint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_s16))) -svint16_t svtrn1q_s16(svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_u8))) -svuint8_t svtrn2q_u8(svuint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_u32))) -svuint32_t svtrn2q_u32(svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_u64))) -svuint64_t svtrn2q_u64(svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_u16))) -svuint16_t svtrn2q_u16(svuint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_s8))) -svint8_t svtrn2q_s8(svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_f64))) -svfloat64_t svtrn2q_f64(svfloat64_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_f32))) -svfloat32_t svtrn2q_f32(svfloat32_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_f16))) -svfloat16_t svtrn2q_f16(svfloat16_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_s32))) -svint32_t svtrn2q_s32(svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_s64))) -svint64_t svtrn2q_s64(svint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_s16))) -svint16_t svtrn2q_s16(svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_u8))) -svuint8_t svuzp1q_u8(svuint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_u32))) -svuint32_t svuzp1q_u32(svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_u64))) -svuint64_t svuzp1q_u64(svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_u16))) -svuint16_t svuzp1q_u16(svuint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_s8))) -svint8_t svuzp1q_s8(svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_f64))) -svfloat64_t svuzp1q_f64(svfloat64_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_f32))) -svfloat32_t svuzp1q_f32(svfloat32_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_f16))) -svfloat16_t svuzp1q_f16(svfloat16_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_s32))) -svint32_t svuzp1q_s32(svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_s64))) -svint64_t svuzp1q_s64(svint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_s16))) -svint16_t svuzp1q_s16(svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_u8))) -svuint8_t svuzp2q_u8(svuint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_u32))) -svuint32_t svuzp2q_u32(svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_u64))) -svuint64_t svuzp2q_u64(svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_u16))) -svuint16_t svuzp2q_u16(svuint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_s8))) -svint8_t svuzp2q_s8(svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_f64))) -svfloat64_t svuzp2q_f64(svfloat64_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_f32))) -svfloat32_t svuzp2q_f32(svfloat32_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_f16))) -svfloat16_t svuzp2q_f16(svfloat16_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_s32))) -svint32_t svuzp2q_s32(svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_s64))) -svint64_t svuzp2q_s64(svint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_s16))) -svint16_t svuzp2q_s16(svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_u8))) -svuint8_t svzip1q_u8(svuint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_u32))) -svuint32_t svzip1q_u32(svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_u64))) -svuint64_t svzip1q_u64(svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_u16))) -svuint16_t svzip1q_u16(svuint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_s8))) -svint8_t svzip1q_s8(svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_f64))) -svfloat64_t svzip1q_f64(svfloat64_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_f32))) -svfloat32_t svzip1q_f32(svfloat32_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_f16))) -svfloat16_t svzip1q_f16(svfloat16_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_s32))) -svint32_t svzip1q_s32(svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_s64))) -svint64_t svzip1q_s64(svint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_s16))) -svint16_t svzip1q_s16(svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_u8))) -svuint8_t svzip2q_u8(svuint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_u32))) -svuint32_t svzip2q_u32(svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_u64))) -svuint64_t svzip2q_u64(svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_u16))) -svuint16_t svzip2q_u16(svuint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_s8))) -svint8_t svzip2q_s8(svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_f64))) -svfloat64_t svzip2q_f64(svfloat64_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_f32))) -svfloat32_t svzip2q_f32(svfloat32_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_f16))) -svfloat16_t svzip2q_f16(svfloat16_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_s32))) -svint32_t svzip2q_s32(svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_s64))) -svint64_t svzip2q_s64(svint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_s16))) -svint16_t svzip2q_s16(svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_u8))) -svuint8_t svld1ro(svbool_t, uint8_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_u32))) -svuint32_t svld1ro(svbool_t, uint32_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_u64))) -svuint64_t svld1ro(svbool_t, uint64_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_u16))) -svuint16_t svld1ro(svbool_t, uint16_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_s8))) -svint8_t svld1ro(svbool_t, int8_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_f64))) -svfloat64_t svld1ro(svbool_t, float64_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_f32))) -svfloat32_t svld1ro(svbool_t, float32_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_f16))) -svfloat16_t svld1ro(svbool_t, float16_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_s32))) -svint32_t svld1ro(svbool_t, int32_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_s64))) -svint64_t svld1ro(svbool_t, int64_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_s16))) -svint16_t svld1ro(svbool_t, int16_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmmla_f64))) -svfloat64_t svmmla(svfloat64_t, svfloat64_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_u8))) -svuint8_t svtrn1q(svuint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_u32))) -svuint32_t svtrn1q(svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_u64))) -svuint64_t svtrn1q(svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_u16))) -svuint16_t svtrn1q(svuint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_s8))) -svint8_t svtrn1q(svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_f64))) -svfloat64_t svtrn1q(svfloat64_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_f32))) -svfloat32_t svtrn1q(svfloat32_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_f16))) -svfloat16_t svtrn1q(svfloat16_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_s32))) -svint32_t svtrn1q(svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_s64))) -svint64_t svtrn1q(svint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_s16))) -svint16_t svtrn1q(svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_u8))) -svuint8_t svtrn2q(svuint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_u32))) -svuint32_t svtrn2q(svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_u64))) -svuint64_t svtrn2q(svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_u16))) -svuint16_t svtrn2q(svuint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_s8))) -svint8_t svtrn2q(svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_f64))) -svfloat64_t svtrn2q(svfloat64_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_f32))) -svfloat32_t svtrn2q(svfloat32_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_f16))) -svfloat16_t svtrn2q(svfloat16_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_s32))) -svint32_t svtrn2q(svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_s64))) -svint64_t svtrn2q(svint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_s16))) -svint16_t svtrn2q(svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_u8))) -svuint8_t svuzp1q(svuint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_u32))) -svuint32_t svuzp1q(svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_u64))) -svuint64_t svuzp1q(svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_u16))) -svuint16_t svuzp1q(svuint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_s8))) -svint8_t svuzp1q(svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_f64))) -svfloat64_t svuzp1q(svfloat64_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_f32))) -svfloat32_t svuzp1q(svfloat32_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_f16))) -svfloat16_t svuzp1q(svfloat16_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_s32))) -svint32_t svuzp1q(svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_s64))) -svint64_t svuzp1q(svint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_s16))) -svint16_t svuzp1q(svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_u8))) -svuint8_t svuzp2q(svuint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_u32))) -svuint32_t svuzp2q(svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_u64))) -svuint64_t svuzp2q(svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_u16))) -svuint16_t svuzp2q(svuint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_s8))) -svint8_t svuzp2q(svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_f64))) -svfloat64_t svuzp2q(svfloat64_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_f32))) -svfloat32_t svuzp2q(svfloat32_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_f16))) -svfloat16_t svuzp2q(svfloat16_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_s32))) -svint32_t svuzp2q(svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_s64))) -svint64_t svuzp2q(svint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_s16))) -svint16_t svuzp2q(svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_u8))) -svuint8_t svzip1q(svuint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_u32))) -svuint32_t svzip1q(svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_u64))) -svuint64_t svzip1q(svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_u16))) -svuint16_t svzip1q(svuint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_s8))) -svint8_t svzip1q(svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_f64))) -svfloat64_t svzip1q(svfloat64_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_f32))) -svfloat32_t svzip1q(svfloat32_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_f16))) -svfloat16_t svzip1q(svfloat16_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_s32))) -svint32_t svzip1q(svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_s64))) -svint64_t svzip1q(svint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_s16))) -svint16_t svzip1q(svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_u8))) -svuint8_t svzip2q(svuint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_u32))) -svuint32_t svzip2q(svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_u64))) -svuint64_t svzip2q(svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_u16))) -svuint16_t svzip2q(svuint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_s8))) -svint8_t svzip2q(svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_f64))) -svfloat64_t svzip2q(svfloat64_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_f32))) -svfloat32_t svzip2q(svfloat32_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_f16))) -svfloat16_t svzip2q(svfloat16_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_s32))) -svint32_t svzip2q(svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_s64))) -svint64_t svzip2q(svint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_s16))) -svint16_t svzip2q(svint16_t, svint16_t); -#endif //defined(__ARM_FEATURE_SVE_MATMUL_FP64) - -#if defined(__ARM_FEATURE_SVE_MATMUL_FP64) && defined(__ARM_FEATURE_SVE_BF16) -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_bf16))) -svbfloat16_t svld1ro_bf16(svbool_t, bfloat16_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_bf16))) -svbfloat16_t svtrn1q_bf16(svbfloat16_t, svbfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_bf16))) -svbfloat16_t svtrn2q_bf16(svbfloat16_t, svbfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_bf16))) -svbfloat16_t svuzp1q_bf16(svbfloat16_t, svbfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_bf16))) -svbfloat16_t svuzp2q_bf16(svbfloat16_t, svbfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_bf16))) -svbfloat16_t svzip1q_bf16(svbfloat16_t, svbfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_bf16))) -svbfloat16_t svzip2q_bf16(svbfloat16_t, svbfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_bf16))) -svbfloat16_t svld1ro(svbool_t, bfloat16_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_bf16))) -svbfloat16_t svtrn1q(svbfloat16_t, svbfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_bf16))) -svbfloat16_t svtrn2q(svbfloat16_t, svbfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_bf16))) -svbfloat16_t svuzp1q(svbfloat16_t, svbfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_bf16))) -svbfloat16_t svuzp2q(svbfloat16_t, svbfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_bf16))) -svbfloat16_t svzip1q(svbfloat16_t, svbfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_bf16))) -svbfloat16_t svzip2q(svbfloat16_t, svbfloat16_t); -#endif //defined(__ARM_FEATURE_SVE_MATMUL_FP64) && defined(__ARM_FEATURE_SVE_BF16) - -#if defined(__ARM_FEATURE_SVE_MATMUL_INT8) -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmmla_s32))) -svint32_t svmmla_s32(svint32_t, svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmmla_u32))) -svuint32_t svmmla_u32(svuint32_t, svuint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsudot_n_s32))) -svint32_t svsudot_n_s32(svint32_t, svint8_t, uint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsudot_s32))) -svint32_t svsudot_s32(svint32_t, svint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsudot_lane_s32))) -svint32_t svsudot_lane_s32(svint32_t, svint8_t, svuint8_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svusdot_n_s32))) -svint32_t svusdot_n_s32(svint32_t, svuint8_t, int8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svusdot_s32))) -svint32_t svusdot_s32(svint32_t, svuint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svusdot_lane_s32))) -svint32_t svusdot_lane_s32(svint32_t, svuint8_t, svint8_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svusmmla_s32))) -svint32_t svusmmla_s32(svint32_t, svuint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmmla_s32))) -svint32_t svmmla(svint32_t, svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmmla_u32))) -svuint32_t svmmla(svuint32_t, svuint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsudot_n_s32))) -svint32_t svsudot(svint32_t, svint8_t, uint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsudot_s32))) -svint32_t svsudot(svint32_t, svint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsudot_lane_s32))) -svint32_t svsudot_lane(svint32_t, svint8_t, svuint8_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svusdot_n_s32))) -svint32_t svusdot(svint32_t, svuint8_t, int8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svusdot_s32))) -svint32_t svusdot(svint32_t, svuint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svusdot_lane_s32))) -svint32_t svusdot_lane(svint32_t, svuint8_t, svint8_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svusmmla_s32))) -svint32_t svusmmla(svint32_t, svuint8_t, svint8_t); -#endif //defined(__ARM_FEATURE_SVE_MATMUL_INT8) -#if defined(__ARM_FEATURE_SVE_BF16) #define svcvtnt_bf16_x svcvtnt_bf16_m #define svcvtnt_bf16_f32_x svcvtnt_bf16_f32_m -#endif /*__ARM_FEATURE_SVE_BF16 */ - -#if defined(__ARM_FEATURE_SVE2) #define svcvtnt_f16_x svcvtnt_f16_m #define svcvtnt_f16_f32_x svcvtnt_f16_f32_m #define svcvtnt_f32_x svcvtnt_f32_m @@ -24032,8 +23884,6 @@ svint32_t svusmmla(svint32_t, svuint8_t, svint8_t); #define svcvtxnt_f32_x svcvtxnt_f32_m #define svcvtxnt_f32_f64_x svcvtxnt_f32_f64_m -#endif /*__ARM_FEATURE_SVE2 */ - #ifdef __cplusplus } // extern "C" #endif @@ -24042,6 +23892,4 @@ svint32_t svusmmla(svint32_t, svuint8_t, svint8_t); #undef __aio -#endif /*__ARM_FEATURE_SVE */ - #endif /* __ARM_SVE_H */ diff --git a/lib/include/avx512bf16intrin.h b/lib/include/avx512bf16intrin.h index 09653738d4..a864c1e335 100644 --- a/lib/include/avx512bf16intrin.h +++ b/lib/include/avx512bf16intrin.h @@ -10,12 +10,14 @@ #error "Never use directly; include instead." #endif +#ifdef __SSE2__ + #ifndef __AVX512BF16INTRIN_H #define __AVX512BF16INTRIN_H -typedef short __m512bh __attribute__((__vector_size__(64), __aligned__(64))); -typedef short __m256bh __attribute__((__vector_size__(32), __aligned__(32))); -typedef unsigned short __bfloat16; +typedef __bf16 __v32bf __attribute__((__vector_size__(64), __aligned__(64))); +typedef __bf16 __m512bh __attribute__((__vector_size__(64), __aligned__(64))); +typedef __bf16 __bfloat16 __attribute__((deprecated("use __bf16 instead"))); #define __DEFAULT_FN_ATTRS512 \ __attribute__((__always_inline__, __nodebug__, __target__("avx512bf16"), \ @@ -33,7 +35,7 @@ typedef unsigned short __bfloat16; /// A bfloat data. /// \returns A float data whose sign field and exponent field keep unchanged, /// and fraction field is extended to 23 bits. -static __inline__ float __DEFAULT_FN_ATTRS _mm_cvtsbh_ss(__bfloat16 __A) { +static __inline__ float __DEFAULT_FN_ATTRS _mm_cvtsbh_ss(__bf16 __A) { return __builtin_ia32_cvtsbf162ss_32(__A); } @@ -74,9 +76,9 @@ _mm512_cvtne2ps_pbh(__m512 __A, __m512 __B) { /// conversion of __B, and higher 256 bits come from conversion of __A. static __inline__ __m512bh __DEFAULT_FN_ATTRS512 _mm512_mask_cvtne2ps_pbh(__m512bh __W, __mmask32 __U, __m512 __A, __m512 __B) { - return (__m512bh)__builtin_ia32_selectw_512((__mmask32)__U, - (__v32hi)_mm512_cvtne2ps_pbh(__A, __B), - (__v32hi)__W); + return (__m512bh)__builtin_ia32_selectpbf_512((__mmask32)__U, + (__v32bf)_mm512_cvtne2ps_pbh(__A, __B), + (__v32bf)__W); } /// Convert Two Packed Single Data to One Packed BF16 Data. @@ -96,9 +98,9 @@ _mm512_mask_cvtne2ps_pbh(__m512bh __W, __mmask32 __U, __m512 __A, __m512 __B) { /// conversion of __B, and higher 256 bits come from conversion of __A. static __inline__ __m512bh __DEFAULT_FN_ATTRS512 _mm512_maskz_cvtne2ps_pbh(__mmask32 __U, __m512 __A, __m512 __B) { - return (__m512bh)__builtin_ia32_selectw_512((__mmask32)__U, - (__v32hi)_mm512_cvtne2ps_pbh(__A, __B), - (__v32hi)_mm512_setzero_si512()); + return (__m512bh)__builtin_ia32_selectpbf_512((__mmask32)__U, + (__v32bf)_mm512_cvtne2ps_pbh(__A, __B), + (__v32bf)_mm512_setzero_si512()); } /// Convert Packed Single Data to Packed BF16 Data. @@ -113,7 +115,7 @@ _mm512_maskz_cvtne2ps_pbh(__mmask32 __U, __m512 __A, __m512 __B) { static __inline__ __m256bh __DEFAULT_FN_ATTRS512 _mm512_cvtneps_pbh(__m512 __A) { return (__m256bh)__builtin_ia32_cvtneps2bf16_512_mask((__v16sf)__A, - (__v16hi)_mm256_undefined_si256(), + (__v16bf)_mm256_undefined_si256(), (__mmask16)-1); } @@ -134,7 +136,7 @@ _mm512_cvtneps_pbh(__m512 __A) { static __inline__ __m256bh __DEFAULT_FN_ATTRS512 _mm512_mask_cvtneps_pbh(__m256bh __W, __mmask16 __U, __m512 __A) { return (__m256bh)__builtin_ia32_cvtneps2bf16_512_mask((__v16sf)__A, - (__v16hi)__W, + (__v16bf)__W, (__mmask16)__U); } @@ -153,7 +155,7 @@ _mm512_mask_cvtneps_pbh(__m256bh __W, __mmask16 __U, __m512 __A) { static __inline__ __m256bh __DEFAULT_FN_ATTRS512 _mm512_maskz_cvtneps_pbh(__mmask16 __U, __m512 __A) { return (__m256bh)__builtin_ia32_cvtneps2bf16_512_mask((__v16sf)__A, - (__v16hi)_mm256_setzero_si256(), + (__v16bf)_mm256_setzero_si256(), (__mmask16)__U); } @@ -174,8 +176,8 @@ _mm512_maskz_cvtneps_pbh(__mmask16 __U, __m512 __A) { static __inline__ __m512 __DEFAULT_FN_ATTRS512 _mm512_dpbf16_ps(__m512 __D, __m512bh __A, __m512bh __B) { return (__m512)__builtin_ia32_dpbf16ps_512((__v16sf) __D, - (__v16si) __A, - (__v16si) __B); + (__v32bf) __A, + (__v32bf) __B); } /// Dot Product of BF16 Pairs Accumulated into Packed Single Precision. @@ -277,3 +279,4 @@ _mm512_mask_cvtpbh_ps(__m512 __S, __mmask16 __U, __m256bh __A) { #undef __DEFAULT_FN_ATTRS512 #endif +#endif diff --git a/lib/include/avx512fintrin.h b/lib/include/avx512fintrin.h index 61bc89c2b8..b19d2fb90f 100644 --- a/lib/include/avx512fintrin.h +++ b/lib/include/avx512fintrin.h @@ -256,8 +256,8 @@ _mm512_maskz_broadcastq_epi64 (__mmask8 __M, __m128i __A) static __inline __m512 __DEFAULT_FN_ATTRS512 _mm512_setzero_ps(void) { - return __extension__ (__m512){ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 }; + return __extension__ (__m512){ 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, + 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f }; } #define _mm512_setzero _mm512_setzero_ps diff --git a/lib/include/avx512fp16intrin.h b/lib/include/avx512fp16intrin.h index 99409a31b3..5cdc37fde6 100644 --- a/lib/include/avx512fp16intrin.h +++ b/lib/include/avx512fp16intrin.h @@ -10,6 +10,8 @@ #error "Never use directly; include instead." #endif +#ifdef __SSE2__ + #ifndef __AVX512FP16INTRIN_H #define __AVX512FP16INTRIN_H @@ -17,12 +19,6 @@ typedef _Float16 __v32hf __attribute__((__vector_size__(64), __aligned__(64))); typedef _Float16 __m512h __attribute__((__vector_size__(64), __aligned__(64))); typedef _Float16 __m512h_u __attribute__((__vector_size__(64), __aligned__(1))); -typedef _Float16 __v8hf __attribute__((__vector_size__(16), __aligned__(16))); -typedef _Float16 __m128h __attribute__((__vector_size__(16), __aligned__(16))); -typedef _Float16 __m128h_u __attribute__((__vector_size__(16), __aligned__(1))); -typedef _Float16 __v16hf __attribute__((__vector_size__(32), __aligned__(32))); -typedef _Float16 __m256h __attribute__((__vector_size__(32), __aligned__(32))); -typedef _Float16 __m256h_u __attribute__((__vector_size__(32), __aligned__(1))); /* Define the default attributes for the functions in this file. */ #define __DEFAULT_FN_ATTRS512 \ @@ -829,7 +825,7 @@ static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_load_sh(void const *__dp) { struct __mm_load_sh_struct { _Float16 __u; } __attribute__((__packed__, __may_alias__)); - _Float16 __u = ((struct __mm_load_sh_struct *)__dp)->__u; + _Float16 __u = ((const struct __mm_load_sh_struct *)__dp)->__u; return (__m128h){__u, 0, 0, 0, 0, 0, 0, 0}; } @@ -838,13 +834,13 @@ _mm_mask_load_sh(__m128h __W, __mmask8 __U, const void *__A) { __m128h src = (__v8hf)__builtin_shufflevector( (__v8hf)__W, (__v8hf)_mm_setzero_ph(), 0, 8, 8, 8, 8, 8, 8, 8); - return (__m128h)__builtin_ia32_loadsh128_mask((__v8hf *)__A, src, __U & 1); + return (__m128h)__builtin_ia32_loadsh128_mask((const __v8hf *)__A, src, __U & 1); } static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_maskz_load_sh(__mmask8 __U, const void *__A) { return (__m128h)__builtin_ia32_loadsh128_mask( - (__v8hf *)__A, (__v8hf)_mm_setzero_ph(), __U & 1); + (const __v8hf *)__A, (__v8hf)_mm_setzero_ph(), __U & 1); } static __inline__ __m512h __DEFAULT_FN_ATTRS512 @@ -3347,3 +3343,4 @@ _mm512_permutexvar_ph(__m512i __A, __m512h __B) { #undef __DEFAULT_FN_ATTRS512 #endif +#endif diff --git a/lib/include/avx512ifmavlintrin.h b/lib/include/avx512ifmavlintrin.h index 5889401d10..3284ee1820 100644 --- a/lib/include/avx512ifmavlintrin.h +++ b/lib/include/avx512ifmavlintrin.h @@ -18,14 +18,21 @@ #define __DEFAULT_FN_ATTRS128 __attribute__((__always_inline__, __nodebug__, __target__("avx512ifma,avx512vl"), __min_vector_width__(128))) #define __DEFAULT_FN_ATTRS256 __attribute__((__always_inline__, __nodebug__, __target__("avx512ifma,avx512vl"), __min_vector_width__(256))) +#define _mm_madd52hi_epu64(X, Y, Z) \ + ((__m128i)__builtin_ia32_vpmadd52huq128((__v2di)(X), (__v2di)(Y), \ + (__v2di)(Z))) +#define _mm256_madd52hi_epu64(X, Y, Z) \ + ((__m256i)__builtin_ia32_vpmadd52huq256((__v4di)(X), (__v4di)(Y), \ + (__v4di)(Z))) -static __inline__ __m128i __DEFAULT_FN_ATTRS128 -_mm_madd52hi_epu64 (__m128i __X, __m128i __Y, __m128i __Z) -{ - return (__m128i)__builtin_ia32_vpmadd52huq128((__v2di) __X, (__v2di) __Y, - (__v2di) __Z); -} +#define _mm_madd52lo_epu64(X, Y, Z) \ + ((__m128i)__builtin_ia32_vpmadd52luq128((__v2di)(X), (__v2di)(Y), \ + (__v2di)(Z))) + +#define _mm256_madd52lo_epu64(X, Y, Z) \ + ((__m256i)__builtin_ia32_vpmadd52luq256((__v4di)(X), (__v4di)(Y), \ + (__v4di)(Z))) static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_madd52hi_epu64 (__m128i __W, __mmask8 __M, __m128i __X, __m128i __Y) @@ -43,13 +50,6 @@ _mm_maskz_madd52hi_epu64 (__mmask8 __M, __m128i __X, __m128i __Y, __m128i __Z) (__v2di)_mm_setzero_si128()); } -static __inline__ __m256i __DEFAULT_FN_ATTRS256 -_mm256_madd52hi_epu64 (__m256i __X, __m256i __Y, __m256i __Z) -{ - return (__m256i)__builtin_ia32_vpmadd52huq256((__v4di)__X, (__v4di)__Y, - (__v4di)__Z); -} - static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_madd52hi_epu64 (__m256i __W, __mmask8 __M, __m256i __X, __m256i __Y) { @@ -66,13 +66,6 @@ _mm256_maskz_madd52hi_epu64 (__mmask8 __M, __m256i __X, __m256i __Y, __m256i __Z (__v4di)_mm256_setzero_si256()); } -static __inline__ __m128i __DEFAULT_FN_ATTRS128 -_mm_madd52lo_epu64 (__m128i __X, __m128i __Y, __m128i __Z) -{ - return (__m128i)__builtin_ia32_vpmadd52luq128((__v2di)__X, (__v2di)__Y, - (__v2di)__Z); -} - static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_madd52lo_epu64 (__m128i __W, __mmask8 __M, __m128i __X, __m128i __Y) { @@ -89,13 +82,6 @@ _mm_maskz_madd52lo_epu64 (__mmask8 __M, __m128i __X, __m128i __Y, __m128i __Z) (__v2di)_mm_setzero_si128()); } -static __inline__ __m256i __DEFAULT_FN_ATTRS256 -_mm256_madd52lo_epu64 (__m256i __X, __m256i __Y, __m256i __Z) -{ - return (__m256i)__builtin_ia32_vpmadd52luq256((__v4di)__X, (__v4di)__Y, - (__v4di)__Z); -} - static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_madd52lo_epu64 (__m256i __W, __mmask8 __M, __m256i __X, __m256i __Y) { diff --git a/lib/include/avx512vlbf16intrin.h b/lib/include/avx512vlbf16intrin.h index 1cdbb28484..f5b8911fac 100644 --- a/lib/include/avx512vlbf16intrin.h +++ b/lib/include/avx512vlbf16intrin.h @@ -10,11 +10,11 @@ #error "Never use directly; include instead." #endif +#ifdef __SSE2__ + #ifndef __AVX512VLBF16INTRIN_H #define __AVX512VLBF16INTRIN_H -typedef short __m128bh __attribute__((__vector_size__(16), __aligned__(16))); - #define __DEFAULT_FN_ATTRS128 \ __attribute__((__always_inline__, __nodebug__, \ __target__("avx512vl, avx512bf16"), __min_vector_width__(128))) @@ -59,9 +59,9 @@ _mm_cvtne2ps_pbh(__m128 __A, __m128 __B) { /// conversion of __B, and higher 64 bits come from conversion of __A. static __inline__ __m128bh __DEFAULT_FN_ATTRS128 _mm_mask_cvtne2ps_pbh(__m128bh __W, __mmask8 __U, __m128 __A, __m128 __B) { - return (__m128bh)__builtin_ia32_selectw_128((__mmask8)__U, - (__v8hi)_mm_cvtne2ps_pbh(__A, __B), - (__v8hi)__W); + return (__m128bh)__builtin_ia32_selectpbf_128((__mmask8)__U, + (__v8bf)_mm_cvtne2ps_pbh(__A, __B), + (__v8bf)__W); } /// Convert Two Packed Single Data to One Packed BF16 Data. @@ -81,9 +81,9 @@ _mm_mask_cvtne2ps_pbh(__m128bh __W, __mmask8 __U, __m128 __A, __m128 __B) { /// conversion of __B, and higher 64 bits come from conversion of __A. static __inline__ __m128bh __DEFAULT_FN_ATTRS128 _mm_maskz_cvtne2ps_pbh(__mmask8 __U, __m128 __A, __m128 __B) { - return (__m128bh)__builtin_ia32_selectw_128((__mmask8)__U, - (__v8hi)_mm_cvtne2ps_pbh(__A, __B), - (__v8hi)_mm_setzero_si128()); + return (__m128bh)__builtin_ia32_selectpbf_128((__mmask8)__U, + (__v8bf)_mm_cvtne2ps_pbh(__A, __B), + (__v8bf)_mm_setzero_si128()); } /// Convert Two Packed Single Data to One Packed BF16 Data. @@ -123,9 +123,9 @@ _mm256_cvtne2ps_pbh(__m256 __A, __m256 __B) { /// conversion of __B, and higher 128 bits come from conversion of __A. static __inline__ __m256bh __DEFAULT_FN_ATTRS256 _mm256_mask_cvtne2ps_pbh(__m256bh __W, __mmask16 __U, __m256 __A, __m256 __B) { - return (__m256bh)__builtin_ia32_selectw_256((__mmask16)__U, - (__v16hi)_mm256_cvtne2ps_pbh(__A, __B), - (__v16hi)__W); + return (__m256bh)__builtin_ia32_selectpbf_256((__mmask16)__U, + (__v16bf)_mm256_cvtne2ps_pbh(__A, __B), + (__v16bf)__W); } /// Convert Two Packed Single Data to One Packed BF16 Data. @@ -145,9 +145,9 @@ _mm256_mask_cvtne2ps_pbh(__m256bh __W, __mmask16 __U, __m256 __A, __m256 __B) { /// conversion of __B, and higher 128 bits come from conversion of __A. static __inline__ __m256bh __DEFAULT_FN_ATTRS256 _mm256_maskz_cvtne2ps_pbh(__mmask16 __U, __m256 __A, __m256 __B) { - return (__m256bh)__builtin_ia32_selectw_256((__mmask16)__U, - (__v16hi)_mm256_cvtne2ps_pbh(__A, __B), - (__v16hi)_mm256_setzero_si256()); + return (__m256bh)__builtin_ia32_selectpbf_256((__mmask16)__U, + (__v16bf)_mm256_cvtne2ps_pbh(__A, __B), + (__v16bf)_mm256_setzero_si256()); } /// Convert Packed Single Data to Packed BF16 Data. @@ -160,12 +160,8 @@ _mm256_maskz_cvtne2ps_pbh(__mmask16 __U, __m256 __A, __m256 __B) { /// A 128-bit vector of [4 x float]. /// \returns A 128-bit vector of [8 x bfloat] whose lower 64 bits come from /// conversion of __A, and higher 64 bits are 0. -static __inline__ __m128bh __DEFAULT_FN_ATTRS128 -_mm_cvtneps_pbh(__m128 __A) { - return (__m128bh)__builtin_ia32_cvtneps2bf16_128_mask((__v4sf) __A, - (__v8hi)_mm_undefined_si128(), - (__mmask8)-1); -} +#define _mm_cvtneps_pbh(A) \ + ((__m128bh)__builtin_ia32_vcvtneps2bf16128((__v4sf)(A))) /// Convert Packed Single Data to Packed BF16 Data. /// @@ -185,7 +181,7 @@ _mm_cvtneps_pbh(__m128 __A) { static __inline__ __m128bh __DEFAULT_FN_ATTRS128 _mm_mask_cvtneps_pbh(__m128bh __W, __mmask8 __U, __m128 __A) { return (__m128bh)__builtin_ia32_cvtneps2bf16_128_mask((__v4sf) __A, - (__v8hi)__W, + (__v8bf)__W, (__mmask8)__U); } @@ -205,7 +201,7 @@ _mm_mask_cvtneps_pbh(__m128bh __W, __mmask8 __U, __m128 __A) { static __inline__ __m128bh __DEFAULT_FN_ATTRS128 _mm_maskz_cvtneps_pbh(__mmask8 __U, __m128 __A) { return (__m128bh)__builtin_ia32_cvtneps2bf16_128_mask((__v4sf) __A, - (__v8hi)_mm_setzero_si128(), + (__v8bf)_mm_setzero_si128(), (__mmask8)__U); } @@ -218,12 +214,8 @@ _mm_maskz_cvtneps_pbh(__mmask8 __U, __m128 __A) { /// \param __A /// A 256-bit vector of [8 x float]. /// \returns A 128-bit vector of [8 x bfloat] comes from conversion of __A. -static __inline__ __m128bh __DEFAULT_FN_ATTRS256 -_mm256_cvtneps_pbh(__m256 __A) { - return (__m128bh)__builtin_ia32_cvtneps2bf16_256_mask((__v8sf)__A, - (__v8hi)_mm_undefined_si128(), - (__mmask8)-1); -} +#define _mm256_cvtneps_pbh(A) \ + ((__m128bh)__builtin_ia32_vcvtneps2bf16256((__v8sf)(A))) /// Convert Packed Single Data to Packed BF16 Data. /// @@ -242,7 +234,7 @@ _mm256_cvtneps_pbh(__m256 __A) { static __inline__ __m128bh __DEFAULT_FN_ATTRS256 _mm256_mask_cvtneps_pbh(__m128bh __W, __mmask8 __U, __m256 __A) { return (__m128bh)__builtin_ia32_cvtneps2bf16_256_mask((__v8sf)__A, - (__v8hi)__W, + (__v8bf)__W, (__mmask8)__U); } @@ -261,7 +253,7 @@ _mm256_mask_cvtneps_pbh(__m128bh __W, __mmask8 __U, __m256 __A) { static __inline__ __m128bh __DEFAULT_FN_ATTRS256 _mm256_maskz_cvtneps_pbh(__mmask8 __U, __m256 __A) { return (__m128bh)__builtin_ia32_cvtneps2bf16_256_mask((__v8sf)__A, - (__v8hi)_mm_setzero_si128(), + (__v8bf)_mm_setzero_si128(), (__mmask8)__U); } @@ -282,8 +274,8 @@ _mm256_maskz_cvtneps_pbh(__mmask8 __U, __m256 __A) { static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_dpbf16_ps(__m128 __D, __m128bh __A, __m128bh __B) { return (__m128)__builtin_ia32_dpbf16ps_128((__v4sf)__D, - (__v4si)__A, - (__v4si)__B); + (__v8bf)__A, + (__v8bf)__B); } /// Dot Product of BF16 Pairs Accumulated into Packed Single Precision. @@ -351,8 +343,8 @@ _mm_maskz_dpbf16_ps(__mmask8 __U, __m128 __D, __m128bh __A, __m128bh __B) { static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_dpbf16_ps(__m256 __D, __m256bh __A, __m256bh __B) { return (__m256)__builtin_ia32_dpbf16ps_256((__v8sf)__D, - (__v8si)__A, - (__v8si)__B); + (__v16bf)__A, + (__v16bf)__B); } /// Dot Product of BF16 Pairs Accumulated into Packed Single Precision. @@ -413,11 +405,11 @@ _mm256_maskz_dpbf16_ps(__mmask8 __U, __m256 __D, __m256bh __A, __m256bh __B) { /// A float data. /// \returns A bf16 data whose sign field and exponent field keep unchanged, /// and fraction field is truncated to 7 bits. -static __inline__ __bfloat16 __DEFAULT_FN_ATTRS128 _mm_cvtness_sbh(float __A) { +static __inline__ __bf16 __DEFAULT_FN_ATTRS128 _mm_cvtness_sbh(float __A) { __v4sf __V = {__A, 0, 0, 0}; - __v8hi __R = __builtin_ia32_cvtneps2bf16_128_mask( - (__v4sf)__V, (__v8hi)_mm_undefined_si128(), (__mmask8)-1); - return (__bfloat16)__R[0]; + __v8bf __R = __builtin_ia32_cvtneps2bf16_128_mask( + (__v4sf)__V, (__v8bf)_mm_undefined_si128(), (__mmask8)-1); + return (__bf16)__R[0]; } /// Convert Packed BF16 Data to Packed float Data. @@ -520,3 +512,4 @@ _mm256_mask_cvtpbh_ps(__m256 __S, __mmask8 __U, __m128bh __A) { #undef __DEFAULT_FN_ATTRS256 #endif +#endif diff --git a/lib/include/avx512vlbwintrin.h b/lib/include/avx512vlbwintrin.h index 521ccab27e..148af5ab9a 100644 --- a/lib/include/avx512vlbwintrin.h +++ b/lib/include/avx512vlbwintrin.h @@ -2803,6 +2803,358 @@ _mm256_mask_permutexvar_epi16 (__m256i __W, __mmask16 __M, __m256i __A, (__v16hi)_mm256_dbsad_epu8((A), (B), (imm)), \ (__v16hi)_mm256_setzero_si256())) +static __inline__ short __DEFAULT_FN_ATTRS128 +_mm_reduce_add_epi16(__m128i __W) { + return __builtin_reduce_add((__v8hi)__W); +} + +static __inline__ short __DEFAULT_FN_ATTRS128 +_mm_reduce_mul_epi16(__m128i __W) { + return __builtin_reduce_mul((__v8hi)__W); +} + +static __inline__ short __DEFAULT_FN_ATTRS128 +_mm_reduce_and_epi16(__m128i __W) { + return __builtin_reduce_and((__v8hi)__W); +} + +static __inline__ short __DEFAULT_FN_ATTRS128 +_mm_reduce_or_epi16(__m128i __W) { + return __builtin_reduce_or((__v8hi)__W); +} + +static __inline__ short __DEFAULT_FN_ATTRS128 +_mm_mask_reduce_add_epi16( __mmask8 __M, __m128i __W) { + __W = _mm_maskz_mov_epi16(__M, __W); + return __builtin_reduce_add((__v8hi)__W); +} + +static __inline__ short __DEFAULT_FN_ATTRS128 +_mm_mask_reduce_mul_epi16( __mmask8 __M, __m128i __W) { + __W = _mm_mask_mov_epi16(_mm_set1_epi16(1), __M, __W); + return __builtin_reduce_mul((__v8hi)__W); +} + +static __inline__ short __DEFAULT_FN_ATTRS128 +_mm_mask_reduce_and_epi16( __mmask8 __M, __m128i __W) { + __W = _mm_mask_mov_epi16(_mm_set1_epi16(-1), __M, __W); + return __builtin_reduce_and((__v8hi)__W); +} + +static __inline__ short __DEFAULT_FN_ATTRS128 +_mm_mask_reduce_or_epi16(__mmask8 __M, __m128i __W) { + __W = _mm_maskz_mov_epi16(__M, __W); + return __builtin_reduce_or((__v8hi)__W); +} + +static __inline__ short __DEFAULT_FN_ATTRS128 +_mm_reduce_max_epi16(__m128i __V) { + return __builtin_reduce_max((__v8hi)__V); +} + +static __inline__ unsigned short __DEFAULT_FN_ATTRS128 +_mm_reduce_max_epu16(__m128i __V) { + return __builtin_reduce_max((__v8hu)__V); +} + +static __inline__ short __DEFAULT_FN_ATTRS128 +_mm_reduce_min_epi16(__m128i __V) { + return __builtin_reduce_min((__v8hi)__V); +} + +static __inline__ unsigned short __DEFAULT_FN_ATTRS128 +_mm_reduce_min_epu16(__m128i __V) { + return __builtin_reduce_min((__v8hu)__V); +} + +static __inline__ short __DEFAULT_FN_ATTRS128 +_mm_mask_reduce_max_epi16(__mmask16 __M, __m128i __V) { + __V = _mm_mask_mov_epi16(_mm_set1_epi16(-32767-1), __M, __V); + return __builtin_reduce_max((__v8hi)__V); +} + +static __inline__ unsigned short __DEFAULT_FN_ATTRS128 +_mm_mask_reduce_max_epu16(__mmask16 __M, __m128i __V) { + __V = _mm_maskz_mov_epi16(__M, __V); + return __builtin_reduce_max((__v8hu)__V); +} + +static __inline__ short __DEFAULT_FN_ATTRS128 +_mm_mask_reduce_min_epi16(__mmask16 __M, __m128i __V) { + __V = _mm_mask_mov_epi16(_mm_set1_epi16(32767), __M, __V); + return __builtin_reduce_min((__v8hi)__V); +} + +static __inline__ unsigned short __DEFAULT_FN_ATTRS128 +_mm_mask_reduce_min_epu16(__mmask16 __M, __m128i __V) { + __V = _mm_mask_mov_epi16(_mm_set1_epi16(-1), __M, __V); + return __builtin_reduce_min((__v8hu)__V); +} + +static __inline__ short __DEFAULT_FN_ATTRS256 +_mm256_reduce_add_epi16(__m256i __W) { + return __builtin_reduce_add((__v16hi)__W); +} + +static __inline__ short __DEFAULT_FN_ATTRS256 +_mm256_reduce_mul_epi16(__m256i __W) { + return __builtin_reduce_mul((__v16hi)__W); +} + +static __inline__ short __DEFAULT_FN_ATTRS256 +_mm256_reduce_and_epi16(__m256i __W) { + return __builtin_reduce_and((__v16hi)__W); +} + +static __inline__ short __DEFAULT_FN_ATTRS256 +_mm256_reduce_or_epi16(__m256i __W) { + return __builtin_reduce_or((__v16hi)__W); +} + +static __inline__ short __DEFAULT_FN_ATTRS256 +_mm256_mask_reduce_add_epi16( __mmask16 __M, __m256i __W) { + __W = _mm256_maskz_mov_epi16(__M, __W); + return __builtin_reduce_add((__v16hi)__W); +} + +static __inline__ short __DEFAULT_FN_ATTRS256 +_mm256_mask_reduce_mul_epi16( __mmask16 __M, __m256i __W) { + __W = _mm256_mask_mov_epi16(_mm256_set1_epi16(1), __M, __W); + return __builtin_reduce_mul((__v16hi)__W); +} + +static __inline__ short __DEFAULT_FN_ATTRS256 +_mm256_mask_reduce_and_epi16( __mmask16 __M, __m256i __W) { + __W = _mm256_mask_mov_epi16(_mm256_set1_epi16(-1), __M, __W); + return __builtin_reduce_and((__v16hi)__W); +} + +static __inline__ short __DEFAULT_FN_ATTRS256 +_mm256_mask_reduce_or_epi16(__mmask16 __M, __m256i __W) { + __W = _mm256_maskz_mov_epi16(__M, __W); + return __builtin_reduce_or((__v16hi)__W); +} + +static __inline__ short __DEFAULT_FN_ATTRS256 +_mm256_reduce_max_epi16(__m256i __V) { + return __builtin_reduce_max((__v16hi)__V); +} + +static __inline__ unsigned short __DEFAULT_FN_ATTRS256 +_mm256_reduce_max_epu16(__m256i __V) { + return __builtin_reduce_max((__v16hu)__V); +} + +static __inline__ short __DEFAULT_FN_ATTRS256 +_mm256_reduce_min_epi16(__m256i __V) { + return __builtin_reduce_min((__v16hi)__V); +} + +static __inline__ unsigned short __DEFAULT_FN_ATTRS256 +_mm256_reduce_min_epu16(__m256i __V) { + return __builtin_reduce_min((__v16hu)__V); +} + +static __inline__ short __DEFAULT_FN_ATTRS256 +_mm256_mask_reduce_max_epi16(__mmask16 __M, __m256i __V) { + __V = _mm256_mask_mov_epi16(_mm256_set1_epi16(-32767-1), __M, __V); + return __builtin_reduce_max((__v16hi)__V); +} + +static __inline__ unsigned short __DEFAULT_FN_ATTRS256 +_mm256_mask_reduce_max_epu16(__mmask16 __M, __m256i __V) { + __V = _mm256_maskz_mov_epi16(__M, __V); + return __builtin_reduce_max((__v16hu)__V); +} + +static __inline__ short __DEFAULT_FN_ATTRS256 +_mm256_mask_reduce_min_epi16(__mmask16 __M, __m256i __V) { + __V = _mm256_mask_mov_epi16(_mm256_set1_epi16(32767), __M, __V); + return __builtin_reduce_min((__v16hi)__V); +} + +static __inline__ unsigned short __DEFAULT_FN_ATTRS256 +_mm256_mask_reduce_min_epu16(__mmask16 __M, __m256i __V) { + __V = _mm256_mask_mov_epi16(_mm256_set1_epi16(-1), __M, __V); + return __builtin_reduce_min((__v16hu)__V); +} + +static __inline__ signed char __DEFAULT_FN_ATTRS128 +_mm_reduce_add_epi8(__m128i __W) { + return __builtin_reduce_add((__v16qs)__W); +} + +static __inline__ signed char __DEFAULT_FN_ATTRS128 +_mm_reduce_mul_epi8(__m128i __W) { + return __builtin_reduce_mul((__v16qs)__W); +} + +static __inline__ signed char __DEFAULT_FN_ATTRS128 +_mm_reduce_and_epi8(__m128i __W) { + return __builtin_reduce_and((__v16qs)__W); +} + +static __inline__ signed char __DEFAULT_FN_ATTRS128 +_mm_reduce_or_epi8(__m128i __W) { + return __builtin_reduce_or((__v16qs)__W); +} + +static __inline__ signed char __DEFAULT_FN_ATTRS128 +_mm_mask_reduce_add_epi8(__mmask16 __M, __m128i __W) { + __W = _mm_maskz_mov_epi8(__M, __W); + return __builtin_reduce_add((__v16qs)__W); +} + +static __inline__ signed char __DEFAULT_FN_ATTRS128 +_mm_mask_reduce_mul_epi8(__mmask16 __M, __m128i __W) { + __W = _mm_mask_mov_epi8(_mm_set1_epi8(1), __M, __W); + return __builtin_reduce_mul((__v16qs)__W); +} + +static __inline__ signed char __DEFAULT_FN_ATTRS128 +_mm_mask_reduce_and_epi8(__mmask16 __M, __m128i __W) { + __W = _mm_mask_mov_epi8(_mm_set1_epi8(-1), __M, __W); + return __builtin_reduce_and((__v16qs)__W); +} + +static __inline__ signed char __DEFAULT_FN_ATTRS128 +_mm_mask_reduce_or_epi8(__mmask16 __M, __m128i __W) { + __W = _mm_maskz_mov_epi8(__M, __W); + return __builtin_reduce_or((__v16qs)__W); +} + +static __inline__ signed char __DEFAULT_FN_ATTRS128 +_mm_reduce_max_epi8(__m128i __V) { + return __builtin_reduce_max((__v16qs)__V); +} + +static __inline__ unsigned char __DEFAULT_FN_ATTRS128 +_mm_reduce_max_epu8(__m128i __V) { + return __builtin_reduce_max((__v16qu)__V); +} + +static __inline__ signed char __DEFAULT_FN_ATTRS128 +_mm_reduce_min_epi8(__m128i __V) { + return __builtin_reduce_min((__v16qs)__V); +} + +static __inline__ unsigned char __DEFAULT_FN_ATTRS128 +_mm_reduce_min_epu8(__m128i __V) { + return __builtin_reduce_min((__v16qu)__V); +} + +static __inline__ signed char __DEFAULT_FN_ATTRS128 +_mm_mask_reduce_max_epi8(__mmask16 __M, __m128i __V) { + __V = _mm_mask_mov_epi8(_mm_set1_epi8(-127-1), __M, __V); + return __builtin_reduce_max((__v16qs)__V); +} + +static __inline__ unsigned char __DEFAULT_FN_ATTRS128 +_mm_mask_reduce_max_epu8(__mmask16 __M, __m128i __V) { + __V = _mm_maskz_mov_epi8(__M, __V); + return __builtin_reduce_max((__v16qu)__V); +} + +static __inline__ signed char __DEFAULT_FN_ATTRS128 +_mm_mask_reduce_min_epi8(__mmask16 __M, __m128i __V) { + __V = _mm_mask_mov_epi8(_mm_set1_epi8(127), __M, __V); + return __builtin_reduce_min((__v16qs)__V); +} + +static __inline__ unsigned char __DEFAULT_FN_ATTRS128 +_mm_mask_reduce_min_epu8(__mmask16 __M, __m128i __V) { + __V = _mm_mask_mov_epi8(_mm_set1_epi8(-1), __M, __V); + return __builtin_reduce_min((__v16qu)__V); +} + +static __inline__ signed char __DEFAULT_FN_ATTRS256 +_mm256_reduce_add_epi8(__m256i __W) { + return __builtin_reduce_add((__v32qs)__W); +} + +static __inline__ signed char __DEFAULT_FN_ATTRS256 +_mm256_reduce_mul_epi8(__m256i __W) { + return __builtin_reduce_mul((__v32qs)__W); +} + +static __inline__ signed char __DEFAULT_FN_ATTRS256 +_mm256_reduce_and_epi8(__m256i __W) { + return __builtin_reduce_and((__v32qs)__W); +} + +static __inline__ signed char __DEFAULT_FN_ATTRS256 +_mm256_reduce_or_epi8(__m256i __W) { + return __builtin_reduce_or((__v32qs)__W); +} + +static __inline__ signed char __DEFAULT_FN_ATTRS256 +_mm256_mask_reduce_add_epi8(__mmask32 __M, __m256i __W) { + __W = _mm256_maskz_mov_epi8(__M, __W); + return __builtin_reduce_add((__v32qs)__W); +} + +static __inline__ signed char __DEFAULT_FN_ATTRS256 +_mm256_mask_reduce_mul_epi8(__mmask32 __M, __m256i __W) { + __W = _mm256_mask_mov_epi8(_mm256_set1_epi8(1), __M, __W); + return __builtin_reduce_mul((__v32qs)__W); +} + +static __inline__ signed char __DEFAULT_FN_ATTRS256 +_mm256_mask_reduce_and_epi8(__mmask32 __M, __m256i __W) { + __W = _mm256_mask_mov_epi8(_mm256_set1_epi8(-1), __M, __W); + return __builtin_reduce_and((__v32qs)__W); +} + +static __inline__ signed char __DEFAULT_FN_ATTRS256 +_mm256_mask_reduce_or_epi8(__mmask32 __M, __m256i __W) { + __W = _mm256_maskz_mov_epi8(__M, __W); + return __builtin_reduce_or((__v32qs)__W); +} + +static __inline__ signed char __DEFAULT_FN_ATTRS256 +_mm256_reduce_max_epi8(__m256i __V) { + return __builtin_reduce_max((__v32qs)__V); +} + +static __inline__ unsigned char __DEFAULT_FN_ATTRS256 +_mm256_reduce_max_epu8(__m256i __V) { + return __builtin_reduce_max((__v32qu)__V); +} + +static __inline__ signed char __DEFAULT_FN_ATTRS256 +_mm256_reduce_min_epi8(__m256i __V) { + return __builtin_reduce_min((__v32qs)__V); +} + +static __inline__ unsigned char __DEFAULT_FN_ATTRS256 +_mm256_reduce_min_epu8(__m256i __V) { + return __builtin_reduce_min((__v32qu)__V); +} + +static __inline__ signed char __DEFAULT_FN_ATTRS256 +_mm256_mask_reduce_max_epi8(__mmask32 __M, __m256i __V) { + __V = _mm256_mask_mov_epi8(_mm256_set1_epi8(-127-1), __M, __V); + return __builtin_reduce_max((__v32qs)__V); +} + +static __inline__ unsigned char __DEFAULT_FN_ATTRS256 +_mm256_mask_reduce_max_epu8(__mmask32 __M, __m256i __V) { + __V = _mm256_maskz_mov_epi8(__M, __V); + return __builtin_reduce_max((__v32qu)__V); +} + +static __inline__ signed char __DEFAULT_FN_ATTRS256 +_mm256_mask_reduce_min_epi8(__mmask32 __M, __m256i __V) { + __V = _mm256_mask_mov_epi8(_mm256_set1_epi8(127), __M, __V); + return __builtin_reduce_min((__v32qs)__V); +} + +static __inline__ unsigned char __DEFAULT_FN_ATTRS256 +_mm256_mask_reduce_min_epu8(__mmask32 __M, __m256i __V) { + __V = _mm256_mask_mov_epi8(_mm256_set1_epi8(-1), __M, __V); + return __builtin_reduce_min((__v32qu)__V); +} + #undef __DEFAULT_FN_ATTRS128 #undef __DEFAULT_FN_ATTRS256 diff --git a/lib/include/avx512vlfp16intrin.h b/lib/include/avx512vlfp16intrin.h index 3d27853ad9..d4a7d1b1c5 100644 --- a/lib/include/avx512vlfp16intrin.h +++ b/lib/include/avx512vlfp16intrin.h @@ -11,6 +11,8 @@ "Never use directly; include instead." #endif +#ifdef __SSE2__ + #ifndef __AVX512VLFP16INTRIN_H #define __AVX512VLFP16INTRIN_H @@ -2066,3 +2068,4 @@ _mm_reduce_min_ph(__m128h __V) { #undef __DEFAULT_FN_ATTRS256 #endif +#endif diff --git a/lib/include/avxifmaintrin.h b/lib/include/avxifmaintrin.h new file mode 100644 index 0000000000..5c782d2a5b --- /dev/null +++ b/lib/include/avxifmaintrin.h @@ -0,0 +1,177 @@ +/*===----------------- avxifmaintrin.h - IFMA intrinsics -------------------=== + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ + +#ifndef __IMMINTRIN_H +#error "Never use directly; include instead." +#endif + +#ifndef __AVXIFMAINTRIN_H +#define __AVXIFMAINTRIN_H + +/* Define the default attributes for the functions in this file. */ +#define __DEFAULT_FN_ATTRS128 \ + __attribute__((__always_inline__, __nodebug__, __target__("avxifma"), \ + __min_vector_width__(128))) +#define __DEFAULT_FN_ATTRS256 \ + __attribute__((__always_inline__, __nodebug__, __target__("avxifma"), \ + __min_vector_width__(256))) + +// must vex-encoding + +/// Multiply packed unsigned 52-bit integers in each 64-bit element of \a __Y +/// and \a __Z to form a 104-bit intermediate result. Add the high 52-bit +/// unsigned integer from the intermediate result with the corresponding +/// unsigned 64-bit integer in \a __X, and store the results in \a dst. +/// +/// \headerfile +/// +/// \code +/// __m128i +/// _mm_madd52hi_avx_epu64 (__m128i __X, __m128i __Y, __m128i __Z) +/// \endcode +/// +/// This intrinsic corresponds to the \c VPMADD52HUQ instruction. +/// +/// \return +/// return __m128i dst. +/// \param __X +/// A 128-bit vector of [2 x i64] +/// \param __Y +/// A 128-bit vector of [2 x i64] +/// \param __Z +/// A 128-bit vector of [2 x i64] +/// +/// \code{.operation} +/// FOR j := 0 to 1 +/// i := j*64 +/// tmp[127:0] := ZeroExtend64(__Y[i+51:i]) * ZeroExtend64(__Z[i+51:i]) +/// dst[i+63:i] := __X[i+63:i] + ZeroExtend64(tmp[103:52]) +/// ENDFOR +/// dst[MAX:128] := 0 +/// \endcode +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_madd52hi_avx_epu64(__m128i __X, __m128i __Y, __m128i __Z) { + return (__m128i)__builtin_ia32_vpmadd52huq128((__v2di)__X, (__v2di)__Y, + (__v2di)__Z); +} + +/// Multiply packed unsigned 52-bit integers in each 64-bit element of \a __Y +/// and \a __Z to form a 104-bit intermediate result. Add the high 52-bit +/// unsigned integer from the intermediate result with the corresponding +/// unsigned 64-bit integer in \a __X, and store the results in \a dst. +/// +/// \headerfile +/// +/// \code +/// __m256i +/// _mm256_madd52hi_avx_epu64 (__m256i __X, __m256i __Y, __m256i __Z) +/// \endcode +/// +/// This intrinsic corresponds to the \c VPMADD52HUQ instruction. +/// +/// \return +/// return __m256i dst. +/// \param __X +/// A 256-bit vector of [4 x i64] +/// \param __Y +/// A 256-bit vector of [4 x i64] +/// \param __Z +/// A 256-bit vector of [4 x i64] +/// +/// \code{.operation} +/// FOR j := 0 to 3 +/// i := j*64 +/// tmp[127:0] := ZeroExtend64(__Y[i+51:i]) * ZeroExtend64(__Z[i+51:i]) +/// dst[i+63:i] := __X[i+63:i] + ZeroExtend64(tmp[103:52]) +/// ENDFOR +/// dst[MAX:256] := 0 +/// \endcode +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_madd52hi_avx_epu64(__m256i __X, __m256i __Y, __m256i __Z) { + return (__m256i)__builtin_ia32_vpmadd52huq256((__v4di)__X, (__v4di)__Y, + (__v4di)__Z); +} + +/// Multiply packed unsigned 52-bit integers in each 64-bit element of \a __Y +/// and \a __Z to form a 104-bit intermediate result. Add the low 52-bit +/// unsigned integer from the intermediate result with the corresponding +/// unsigned 64-bit integer in \a __X, and store the results in \a dst. +/// +/// \headerfile +/// +/// \code +/// __m128i +/// _mm_madd52lo_avx_epu64 (__m128i __X, __m128i __Y, __m128i __Z) +/// \endcode +/// +/// This intrinsic corresponds to the \c VPMADD52LUQ instruction. +/// +/// \return +/// return __m128i dst. +/// \param __X +/// A 128-bit vector of [2 x i64] +/// \param __Y +/// A 128-bit vector of [2 x i64] +/// \param __Z +/// A 128-bit vector of [2 x i64] +/// +/// \code{.operation} +/// FOR j := 0 to 1 +/// i := j*64 +/// tmp[127:0] := ZeroExtend64(__Y[i+51:i]) * ZeroExtend64(__Z[i+51:i]) +/// dst[i+63:i] := __X[i+63:i] + ZeroExtend64(tmp[51:0]) +/// ENDFOR +/// dst[MAX:128] := 0 +/// \endcode +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_madd52lo_avx_epu64(__m128i __X, __m128i __Y, __m128i __Z) { + return (__m128i)__builtin_ia32_vpmadd52luq128((__v2di)__X, (__v2di)__Y, + (__v2di)__Z); +} + +/// Multiply packed unsigned 52-bit integers in each 64-bit element of \a __Y +/// and \a __Z to form a 104-bit intermediate result. Add the low 52-bit +/// unsigned integer from the intermediate result with the corresponding +/// unsigned 64-bit integer in \a __X, and store the results in \a dst. +/// +/// \headerfile +/// +/// \code +/// __m256i +/// _mm256_madd52lo_avx_epu64 (__m256i __X, __m256i __Y, __m256i __Z) +/// \endcode +/// +/// This intrinsic corresponds to the \c VPMADD52LUQ instruction. +/// +/// \return +/// return __m256i dst. +/// \param __X +/// A 256-bit vector of [4 x i64] +/// \param __Y +/// A 256-bit vector of [4 x i64] +/// \param __Z +/// A 256-bit vector of [4 x i64] +/// +/// \code{.operation} +/// FOR j := 0 to 3 +/// i := j*64 +/// tmp[127:0] := ZeroExtend64(__Y[i+51:i]) * ZeroExtend64(__Z[i+51:i]) +/// dst[i+63:i] := __X[i+63:i] + ZeroExtend64(tmp[51:0]) +/// ENDFOR +/// dst[MAX:256] := 0 +/// \endcode +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_madd52lo_avx_epu64(__m256i __X, __m256i __Y, __m256i __Z) { + return (__m256i)__builtin_ia32_vpmadd52luq256((__v4di)__X, (__v4di)__Y, + (__v4di)__Z); +} +#undef __DEFAULT_FN_ATTRS128 +#undef __DEFAULT_FN_ATTRS256 + +#endif // __AVXIFMAINTRIN_H diff --git a/lib/include/avxintrin.h b/lib/include/avxintrin.h index a8f953c260..ee31569c16 100644 --- a/lib/include/avxintrin.h +++ b/lib/include/avxintrin.h @@ -39,6 +39,16 @@ typedef float __m256_u __attribute__ ((__vector_size__ (32), __aligned__(1))); typedef double __m256d_u __attribute__((__vector_size__(32), __aligned__(1))); typedef long long __m256i_u __attribute__((__vector_size__(32), __aligned__(1))); +#ifdef __SSE2__ +/* Both _Float16 and __bf16 require SSE2 being enabled. */ +typedef _Float16 __v16hf __attribute__((__vector_size__(32), __aligned__(32))); +typedef _Float16 __m256h __attribute__((__vector_size__(32), __aligned__(32))); +typedef _Float16 __m256h_u __attribute__((__vector_size__(32), __aligned__(1))); + +typedef __bf16 __v16bf __attribute__((__vector_size__(32), __aligned__(32))); +typedef __bf16 __m256bh __attribute__((__vector_size__(32), __aligned__(32))); +#endif + /* Define the default attributes for the functions in this file. */ #define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("avx"), __min_vector_width__(256))) #define __DEFAULT_FN_ATTRS128 __attribute__((__always_inline__, __nodebug__, __target__("avx"), __min_vector_width__(128))) @@ -4288,7 +4298,7 @@ _mm256_set1_epi64x(long long __q) static __inline __m256d __DEFAULT_FN_ATTRS _mm256_setzero_pd(void) { - return __extension__ (__m256d){ 0, 0, 0, 0 }; + return __extension__ (__m256d){ 0.0, 0.0, 0.0, 0.0 }; } /// Constructs a 256-bit floating-point vector of [8 x float] with all @@ -4302,7 +4312,7 @@ _mm256_setzero_pd(void) static __inline __m256 __DEFAULT_FN_ATTRS _mm256_setzero_ps(void) { - return __extension__ (__m256){ 0, 0, 0, 0, 0, 0, 0, 0 }; + return __extension__ (__m256){ 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f }; } /// Constructs a 256-bit integer vector initialized to zero. diff --git a/lib/include/avxneconvertintrin.h b/lib/include/avxneconvertintrin.h new file mode 100644 index 0000000000..1bef1c8937 --- /dev/null +++ b/lib/include/avxneconvertintrin.h @@ -0,0 +1,484 @@ +/*===-------------- avxneconvertintrin.h - AVXNECONVERT --------------------=== + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ + +#ifndef __IMMINTRIN_H +#error \ + "Never use directly; include instead." +#endif // __IMMINTRIN_H + +#ifdef __SSE2__ + +#ifndef __AVXNECONVERTINTRIN_H +#define __AVXNECONVERTINTRIN_H + +/* Define the default attributes for the functions in this file. */ +#define __DEFAULT_FN_ATTRS128 \ + __attribute__((__always_inline__, __nodebug__, __target__("avxneconvert"), \ + __min_vector_width__(128))) +#define __DEFAULT_FN_ATTRS256 \ + __attribute__((__always_inline__, __nodebug__, __target__("avxneconvert"), \ + __min_vector_width__(256))) + +/// Convert scalar BF16 (16-bit) floating-point element +/// stored at memory locations starting at location \a __A to a +/// single-precision (32-bit) floating-point, broadcast it to packed +/// single-precision (32-bit) floating-point elements, and store the results in +/// \a dst. +/// +/// \headerfile +/// +/// \code +/// _mm_bcstnebf16_ps(const void *__A); +/// \endcode +/// +/// This intrinsic corresponds to the \c VBCSTNEBF162PS instruction. +/// +/// \param __A +/// A pointer to a 16-bit memory location. The address of the memory +/// location does not have to be aligned. +/// \returns +/// A 128-bit vector of [4 x float]. +/// +/// \code{.operation} +/// b := Convert_BF16_To_FP32(MEM[__A+15:__A]) +/// FOR j := 0 to 3 +/// m := j*32 +/// dst[m+31:m] := b +/// ENDFOR +/// dst[MAX:128] := 0 +/// \endcode +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_bcstnebf16_ps(const void *__A) { + return (__m128)__builtin_ia32_vbcstnebf162ps128((const __bf16 *)__A); +} + +/// Convert scalar BF16 (16-bit) floating-point element +/// stored at memory locations starting at location \a __A to a +/// single-precision (32-bit) floating-point, broadcast it to packed +/// single-precision (32-bit) floating-point elements, and store the results in +/// \a dst. +/// +/// \headerfile +/// +/// \code +/// _mm256_bcstnebf16_ps(const void *__A); +/// \endcode +/// +/// This intrinsic corresponds to the \c VBCSTNEBF162PS instruction. +/// +/// \param __A +/// A pointer to a 16-bit memory location. The address of the memory +/// location does not have to be aligned. +/// \returns +/// A 256-bit vector of [8 x float]. +/// +/// \code{.operation} +/// b := Convert_BF16_To_FP32(MEM[__A+15:__A]) +/// FOR j := 0 to 7 +/// m := j*32 +/// dst[m+31:m] := b +/// ENDFOR +/// dst[MAX:256] := 0 +/// \endcode +static __inline__ __m256 __DEFAULT_FN_ATTRS256 +_mm256_bcstnebf16_ps(const void *__A) { + return (__m256)__builtin_ia32_vbcstnebf162ps256((const __bf16 *)__A); +} + +/// Convert scalar half-precision (16-bit) floating-point element +/// stored at memory locations starting at location \a __A to a +/// single-precision (32-bit) floating-point, broadcast it to packed +/// single-precision (32-bit) floating-point elements, and store the results in +/// \a dst. +/// +/// \headerfile +/// +/// \code +/// _mm_bcstnesh_ps(const void *__A); +/// \endcode +/// +/// This intrinsic corresponds to the \c VBCSTNESH2PS instruction. +/// +/// \param __A +/// A pointer to a 16-bit memory location. The address of the memory +/// location does not have to be aligned. +/// \returns +/// A 128-bit vector of [4 x float]. +/// +/// \code{.operation} +/// b := Convert_FP16_To_FP32(MEM[__A+15:__A]) +/// FOR j := 0 to 3 +/// m := j*32 +/// dst[m+31:m] := b +/// ENDFOR +/// dst[MAX:128] := 0 +/// \endcode +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_bcstnesh_ps(const void *__A) { + return (__m128)__builtin_ia32_vbcstnesh2ps128((const _Float16 *)__A); +} + +/// Convert scalar half-precision (16-bit) floating-point element +/// stored at memory locations starting at location \a __A to a +/// single-precision (32-bit) floating-point, broadcast it to packed +/// single-precision (32-bit) floating-point elements, and store the results in +/// \a dst. +/// +/// \headerfile +/// +/// \code +/// _mm256_bcstnesh_ps(const void *__A); +/// \endcode +/// +/// This intrinsic corresponds to the \c VBCSTNESH2PS instruction. +/// +/// \param __A +/// A pointer to a 16-bit memory location. The address of the memory +/// location does not have to be aligned. +/// \returns +/// A 256-bit vector of [8 x float]. +/// +/// \code{.operation} +/// b := Convert_FP16_To_FP32(MEM[__A+15:__A]) +/// FOR j := 0 to 7 +/// m := j*32 +/// dst[m+31:m] := b +/// ENDFOR +/// dst[MAX:256] := 0 +/// \endcode +static __inline__ __m256 __DEFAULT_FN_ATTRS256 +_mm256_bcstnesh_ps(const void *__A) { + return (__m256)__builtin_ia32_vbcstnesh2ps256((const _Float16 *)__A); +} + +/// Convert packed BF16 (16-bit) floating-point even-indexed elements +/// stored at memory locations starting at location \a __A to packed +/// single-precision (32-bit) floating-point elements, and store the results in +/// \a dst. +/// +/// \headerfile +/// +/// \code +/// _mm_cvtneebf16_ps(const __m128bh *__A); +/// \endcode +/// +/// This intrinsic corresponds to the \c VCVTNEEBF162PS instruction. +/// +/// \param __A +/// A pointer to a 128-bit memory location containing 8 consecutive +/// BF16 (16-bit) floating-point values. +/// \returns +/// A 128-bit vector of [4 x float]. +/// +/// \code{.operation} +/// FOR j := 0 to 3 +/// k := j*2 +/// i := k*16 +/// m := j*32 +/// dst[m+31:m] := Convert_BF16_To_FP32(MEM[__A+i+15:__A+i]) +/// ENDFOR +/// dst[MAX:128] := 0 +/// \endcode +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_cvtneebf16_ps(const __m128bh *__A) { + return (__m128)__builtin_ia32_vcvtneebf162ps128((const __v8bf *)__A); +} + +/// Convert packed BF16 (16-bit) floating-point even-indexed elements +/// stored at memory locations starting at location \a __A to packed +/// single-precision (32-bit) floating-point elements, and store the results in +/// \a dst. +/// +/// \headerfile +/// +/// \code +/// _mm256_cvtneebf16_ps(const __m256bh *__A); +/// \endcode +/// +/// This intrinsic corresponds to the \c VCVTNEEBF162PS instruction. +/// +/// \param __A +/// A pointer to a 256-bit memory location containing 16 consecutive +/// BF16 (16-bit) floating-point values. +/// \returns +/// A 256-bit vector of [8 x float]. +/// +/// \code{.operation} +/// FOR j := 0 to 7 +/// k := j*2 +/// i := k*16 +/// m := j*32 +/// dst[m+31:m] := Convert_BF16_To_FP32(MEM[__A+i+15:__A+i]) +/// ENDFOR +/// dst[MAX:256] := 0 +/// \endcode +static __inline__ __m256 __DEFAULT_FN_ATTRS256 +_mm256_cvtneebf16_ps(const __m256bh *__A) { + return (__m256)__builtin_ia32_vcvtneebf162ps256((const __v16bf *)__A); +} + +/// Convert packed half-precision (16-bit) floating-point even-indexed elements +/// stored at memory locations starting at location \a __A to packed +/// single-precision (32-bit) floating-point elements, and store the results in +/// \a dst. +/// +/// \headerfile +/// +/// \code +/// _mm_cvtneeph_ps(const __m128h *__A); +/// \endcode +/// +/// This intrinsic corresponds to the \c VCVTNEEPH2PS instruction. +/// +/// \param __A +/// A pointer to a 128-bit memory location containing 8 consecutive +/// half-precision (16-bit) floating-point values. +/// \returns +/// A 128-bit vector of [4 x float]. +/// +/// \code{.operation} +/// FOR j := 0 to 3 +/// k := j*2 +/// i := k*16 +/// m := j*32 +/// dst[m+31:m] := Convert_FP16_To_FP32(MEM[__A+i+15:__A+i]) +/// ENDFOR +/// dst[MAX:128] := 0 +/// \endcode +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_cvtneeph_ps(const __m128h *__A) { + return (__m128)__builtin_ia32_vcvtneeph2ps128((const __v8hf *)__A); +} + +/// Convert packed half-precision (16-bit) floating-point even-indexed elements +/// stored at memory locations starting at location \a __A to packed +/// single-precision (32-bit) floating-point elements, and store the results in +/// \a dst. +/// +/// \headerfile +/// +/// \code +/// _mm256_cvtneeph_ps(const __m256h *__A); +/// \endcode +/// +/// This intrinsic corresponds to the \c VCVTNEEPH2PS instruction. +/// +/// \param __A +/// A pointer to a 256-bit memory location containing 16 consecutive +/// half-precision (16-bit) floating-point values. +/// \returns +/// A 256-bit vector of [8 x float]. +/// +/// \code{.operation} +/// FOR j := 0 to 7 +/// k := j*2 +/// i := k*16 +/// m := j*32 +/// dst[m+31:m] := Convert_FP16_To_FP32(MEM[__A+i+15:__A+i]) +/// ENDFOR +/// dst[MAX:256] := 0 +/// \endcode +static __inline__ __m256 __DEFAULT_FN_ATTRS256 +_mm256_cvtneeph_ps(const __m256h *__A) { + return (__m256)__builtin_ia32_vcvtneeph2ps256((const __v16hf *)__A); +} + +/// Convert packed BF16 (16-bit) floating-point odd-indexed elements +/// stored at memory locations starting at location \a __A to packed +/// single-precision (32-bit) floating-point elements, and store the results in +/// \a dst. +/// +/// \headerfile +/// +/// \code +/// _mm_cvtneobf16_ps(const __m128bh *__A); +/// \endcode +/// +/// This intrinsic corresponds to the \c VCVTNEOBF162PS instruction. +/// +/// \param __A +/// A pointer to a 128-bit memory location containing 8 consecutive +/// BF16 (16-bit) floating-point values. +/// \returns +/// A 128-bit vector of [4 x float]. +/// +/// \code{.operation} +/// FOR j := 0 to 3 +/// k := j*2+1 +/// i := k*16 +/// m := j*32 +/// dst[m+31:m] := Convert_BF16_To_FP32(MEM[__A+i+15:__A+i]) +/// ENDFOR +/// dst[MAX:128] := 0 +/// \endcode +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_cvtneobf16_ps(const __m128bh *__A) { + return (__m128)__builtin_ia32_vcvtneobf162ps128((const __v8bf *)__A); +} + +/// Convert packed BF16 (16-bit) floating-point odd-indexed elements +/// stored at memory locations starting at location \a __A to packed +/// single-precision (32-bit) floating-point elements, and store the results in +/// \a dst. +/// +/// \headerfile +/// +/// \code +/// _mm256_cvtneobf16_ps(const __m256bh *__A); +/// \endcode +/// +/// This intrinsic corresponds to the \c VCVTNEOBF162PS instruction. +/// +/// \param __A +/// A pointer to a 256-bit memory location containing 16 consecutive +/// BF16 (16-bit) floating-point values. +/// \returns +/// A 256-bit vector of [8 x float]. +/// +/// \code{.operation} +/// FOR j := 0 to 7 +/// k := j*2+1 +/// i := k*16 +/// m := j*32 +/// dst[m+31:m] := Convert_BF16_To_FP32(MEM[__A+i+15:__A+i]) +/// ENDFOR +/// dst[MAX:256] := 0 +/// \endcode +static __inline__ __m256 __DEFAULT_FN_ATTRS256 +_mm256_cvtneobf16_ps(const __m256bh *__A) { + return (__m256)__builtin_ia32_vcvtneobf162ps256((const __v16bf *)__A); +} + +/// Convert packed half-precision (16-bit) floating-point odd-indexed elements +/// stored at memory locations starting at location \a __A to packed +/// single-precision (32-bit) floating-point elements, and store the results in +/// \a dst. +/// +/// \headerfile +/// +/// \code +/// _mm_cvtneoph_ps(const __m128h *__A); +/// \endcode +/// +/// This intrinsic corresponds to the \c VCVTNEOPH2PS instruction. +/// +/// \param __A +/// A pointer to a 128-bit memory location containing 8 consecutive +/// half-precision (16-bit) floating-point values. +/// \returns +/// A 128-bit vector of [4 x float]. +/// +/// \code{.operation} +/// FOR j := 0 to 3 +/// k := j*2+1 +/// i := k*16 +/// m := j*32 +/// dst[m+31:m] := Convert_FP16_To_FP32(MEM[__A+i+15:__A+i]) +/// ENDFOR +/// dst[MAX:128] := 0 +/// \endcode +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_cvtneoph_ps(const __m128h *__A) { + return (__m128)__builtin_ia32_vcvtneoph2ps128((const __v8hf *)__A); +} + +/// Convert packed half-precision (16-bit) floating-point odd-indexed elements +/// stored at memory locations starting at location \a __A to packed +/// single-precision (32-bit) floating-point elements, and store the results in +/// \a dst. +/// +/// \headerfile +/// +/// \code +/// _mm256_cvtneoph_ps(const __m256h *__A); +/// \endcode +/// +/// This intrinsic corresponds to the \c VCVTNEOPH2PS instruction. +/// +/// \param __A +/// A pointer to a 256-bit memory location containing 16 consecutive +/// half-precision (16-bit) floating-point values. +/// \returns +/// A 256-bit vector of [8 x float]. +/// +/// \code{.operation} +/// FOR j := 0 to 7 +/// k := j*2+1 +/// i := k*16 +/// m := j*32 +/// dst[m+31:m] := Convert_FP16_To_FP32(MEM[__A+i+15:__A+i]) +/// ENDFOR +/// dst[MAX:256] := 0 +/// \endcode +static __inline__ __m256 __DEFAULT_FN_ATTRS256 +_mm256_cvtneoph_ps(const __m256h *__A) { + return (__m256)__builtin_ia32_vcvtneoph2ps256((const __v16hf *)__A); +} + +/// Convert packed single-precision (32-bit) floating-point elements in \a __A +/// to packed BF16 (16-bit) floating-point elements, and store the results in \a +/// dst. +/// +/// \headerfile +/// +/// \code +/// _mm_cvtneps_avx_pbh(__m128 __A); +/// \endcode +/// +/// This intrinsic corresponds to the \c VCVTNEPS2BF16 instruction. +/// +/// \param __A +/// A 128-bit vector of [4 x float]. +/// \returns +/// A 128-bit vector of [8 x bfloat]. +/// +/// \code{.operation} +/// FOR j := 0 to 3 +/// dst.word[j] := Convert_FP32_To_BF16(__A.fp32[j]) +/// ENDFOR +/// dst[MAX:128] := 0 +/// \endcode +static __inline__ __m128bh __DEFAULT_FN_ATTRS128 +_mm_cvtneps_avx_pbh(__m128 __A) { + return (__m128bh)__builtin_ia32_vcvtneps2bf16128((__v4sf)__A); +} + +/// Convert packed single-precision (32-bit) floating-point elements in \a __A +/// to packed BF16 (16-bit) floating-point elements, and store the results in \a +/// dst. +/// +/// \headerfile +/// +/// \code +/// _mm256_cvtneps_avx_pbh(__m256 __A); +/// \endcode +/// +/// This intrinsic corresponds to the \c VCVTNEPS2BF16 instruction. +/// +/// \param __A +/// A 256-bit vector of [8 x float]. +/// \returns +/// A 128-bit vector of [8 x bfloat]. +/// +/// \code{.operation} +/// FOR j := 0 to 7 +/// dst.word[j] := Convert_FP32_To_BF16(a.fp32[j]) +/// ENDFOR +/// dst[MAX:128] := 0 +/// \endcode +static __inline__ __m128bh __DEFAULT_FN_ATTRS256 +_mm256_cvtneps_avx_pbh(__m256 __A) { + return (__m128bh)__builtin_ia32_vcvtneps2bf16256((__v8sf)__A); +} + +#undef __DEFAULT_FN_ATTRS128 +#undef __DEFAULT_FN_ATTRS256 + +#endif // __AVXNECONVERTINTRIN_H +#endif // __SSE2__ diff --git a/lib/include/avxvnniint8intrin.h b/lib/include/avxvnniint8intrin.h new file mode 100644 index 0000000000..b0b6cb853f --- /dev/null +++ b/lib/include/avxvnniint8intrin.h @@ -0,0 +1,471 @@ +/*===-------- avxvnniint8intrin.h - AVXVNNIINT8 intrinsics -----------=== + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ +#ifndef __IMMINTRIN_H +#error \ + "Never use directly; include instead." +#endif + +#ifndef __AVXVNNIINT8INTRIN_H +#define __AVXVNNIINT8INTRIN_H + +/* Define the default attributes for the functions in this file. */ +#define __DEFAULT_FN_ATTRS256 \ + __attribute__((__always_inline__, __nodebug__, __target__("avxvnniint8"), \ + __min_vector_width__(256))) +#define __DEFAULT_FN_ATTRS128 \ + __attribute__((__always_inline__, __nodebug__, __target__("avxvnniint8"), \ + __min_vector_width__(128))) + +/// Multiply groups of 4 adjacent pairs of signed 8-bit integers in \a __A with +/// corresponding signed 8-bit integers in \a __B, producing 4 intermediate +/// signed 16-bit results. Sum these 4 results with the corresponding +/// 32-bit integer in \a __W, and store the packed 32-bit results in \a dst. +/// +/// \headerfile +/// +/// \code +/// _mm_dpbssd_epi32(__m128i __W, __m128i __A, __m128i __B); +/// \endcode +/// +/// This intrinsic corresponds to the \c VPDPBSSD instruction. +/// +/// \param __A +/// A 128-bit vector of [16 x char]. +/// \param __B +/// A 128-bit vector of [16 x char]. +/// \returns +/// A 128-bit vector of [4 x int]. +/// +/// \code{.operation} +/// FOR j := 0 to 3 +/// tmp1.word := SignExtend16(__A.byte[4*j]) * SignExtend16(__B.byte[4*j]) +/// tmp2.word := SignExtend16(__A.byte[4*j+1]) * SignExtend16(__B.byte[4*j+1]) +/// tmp3.word := SignExtend16(__A.byte[4*j+2]) * SignExtend16(__B.byte[4*j+2]) +/// tmp4.word := SignExtend16(__A.byte[4*j+3]) * SignExtend16(__B.byte[4*j+3]) +/// dst.dword[j] := __W.dword[j] + tmp1 + tmp2 + tmp3 + tmp4 +/// ENDFOR +/// dst[MAX:128] := 0 +/// \endcode +static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_dpbssd_epi32(__m128i __W, + __m128i __A, + __m128i __B) { + return (__m128i)__builtin_ia32_vpdpbssd128((__v4si)__W, (__v4si)__A, + (__v4si)__B); +} + +/// Multiply groups of 4 adjacent pairs of signed 8-bit integers in \a __A with +/// corresponding signed 8-bit integers in \a __B, producing 4 intermediate +/// signed 16-bit results. Sum these 4 results with the corresponding +/// 32-bit integer in \a __W, and store the packed 32-bit results in \a dst. +/// +/// \headerfile +/// +/// \code +/// _mm256_dpbssd_epi32(__m256i __W, __m256i __A, __m256i __B); +/// \endcode +/// +/// This intrinsic corresponds to the \c VPDPBSSD instruction. +/// +/// \param __A +/// A 256-bit vector of [32 x char]. +/// \param __B +/// A 256-bit vector of [32 x char]. +/// \returns +/// A 256-bit vector of [8 x int]. +/// +/// \code{.operation} +/// FOR j := 0 to 7 +/// tmp1.word := SignExtend16(__A.byte[4*j]) * SignExtend16(__B.byte[4*j]) +/// tmp2.word := SignExtend16(__A.byte[4*j+1]) * SignExtend16(__B.byte[4*j+1]) +/// tmp3.word := SignExtend16(__A.byte[4*j+2]) * SignExtend16(__B.byte[4*j+2]) +/// tmp4.word := SignExtend16(__A.byte[4*j+3]) * SignExtend16(__B.byte[4*j+3]) +/// dst.dword[j] := __W.dword[j] + tmp1 + tmp2 + tmp3 + tmp4 +/// ENDFOR +/// dst[MAX:256] := 0 +/// \endcode +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_dpbssd_epi32(__m256i __W, __m256i __A, __m256i __B) { + return (__m256i)__builtin_ia32_vpdpbssd256((__v8si)__W, (__v8si)__A, + (__v8si)__B); +} + +/// Multiply groups of 4 adjacent pairs of signed 8-bit integers in \a __A with +/// corresponding signed 8-bit integers in \a __B, producing 4 intermediate +/// signed 16-bit results. Sum these 4 results with the corresponding +/// 32-bit integer in \a __W with signed saturation, and store the packed +/// 32-bit results in \a dst. +/// +/// \headerfile +/// +/// \code +/// _mm_dpbssds_epi32( __m128i __W, __m128i __A, __m128i __B); +/// \endcode +/// +/// This intrinsic corresponds to the \c VPDPBSSD instruction. +/// +/// \param __A +/// A 128-bit vector of [16 x char]. +/// \param __B +/// A 128-bit vector of [16 x char]. +/// \returns +/// A 128-bit vector of [4 x int]. +/// +/// \code{.operation} +/// FOR j := 0 to 3 +/// tmp1.word := SignExtend16(__A.byte[4*j]) * SignExtend16(__B.byte[4*j]) +/// tmp2.word := SignExtend16(__A.byte[4*j+1]) * SignExtend16(__B.byte[4*j+1]) +/// tmp3.word := SignExtend16(__A.byte[4*j+2]) * SignExtend16(__B.byte[4*j+2]) +/// tmp4.word := SignExtend16(__A.byte[4*j+3]) * SignExtend16(__B.byte[4*j+3]) +/// dst.dword[j] := SIGNED_DWORD_SATURATE(__W.dword[j] + tmp1 + tmp2 + tmp3 + tmp4) +/// ENDFOR +/// dst[MAX:128] := 0 +/// \endcode +static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_dpbssds_epi32(__m128i __W, + __m128i __A, + __m128i __B) { + return (__m128i)__builtin_ia32_vpdpbssds128((__v4si)__W, (__v4si)__A, + (__v4si)__B); +} + +/// Multiply groups of 4 adjacent pairs of signed 8-bit integers in \a __A with +/// corresponding signed 8-bit integers in \a __B, producing 4 intermediate +/// signed 16-bit results. Sum these 4 results with the corresponding +/// 32-bit integer in \a __W with signed saturation, and store the packed +/// 32-bit results in \a dst. +/// +/// \headerfile +/// +/// \code +/// _mm256_dpbssds_epi32(__m256i __W, __m256i __A, __m256i __B); +/// \endcode +/// +/// This intrinsic corresponds to the \c VPDPBSSD instruction. +/// +/// \param __A +/// A 256-bit vector of [32 x char]. +/// \param __B +/// A 256-bit vector of [32 x char]. +/// \returns +/// A 256-bit vector of [8 x int]. +/// +/// \code{.operation} +/// FOR j := 0 to 7 +/// tmp1.word := SignExtend16(__A.byte[4*j]) * SignExtend16(__B.byte[4*j]) +/// tmp2.word := SignExtend16(__A.byte[4*j+1]) * SignExtend16(__B.byte[4*j+1]) +/// tmp3.word := SignExtend16(__A.byte[4*j+2]) * SignExtend16(__B.byte[4*j+2]) +/// tmp4.word := SignExtend16(__A.byte[4*j+3]) * SignExtend16(__B.byte[4*j+3]) +/// dst.dword[j] := SIGNED_DWORD_SATURATE(__W.dword[j] + tmp1 + tmp2 + tmp3 + tmp4) +/// ENDFOR +/// dst[MAX:256] := 0 +/// \endcode +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_dpbssds_epi32(__m256i __W, __m256i __A, __m256i __B) { + return (__m256i)__builtin_ia32_vpdpbssds256((__v8si)__W, (__v8si)__A, + (__v8si)__B); +} + +/// Multiply groups of 4 adjacent pairs of signed 8-bit integers in \a __A with +/// corresponding unsigned 8-bit integers in \a __B, producing 4 intermediate +/// signed 16-bit results. Sum these 4 results with the corresponding +/// 32-bit integer in \a __W, and store the packed 32-bit results in \a dst. +/// +/// \headerfile +/// +/// \code +/// _mm_dpbsud_epi32(__m128i __W, __m128i __A, __m128i __B); +/// \endcode +/// +/// This intrinsic corresponds to the \c VPDPBSSD instruction. +/// +/// \param __A +/// A 128-bit vector of [16 x char]. +/// \param __B +/// A 128-bit vector of [16 x unsigned char]. +/// \returns +/// A 128-bit vector of [4 x int]. +/// +/// \code{.operation} +/// FOR j := 0 to 3 +/// tmp1.word := Signed(SignExtend16(__A.byte[4*j]) * ZeroExtend16(__B.byte[4*j])) +/// tmp2.word := Signed(SignExtend16(__A.byte[4*j+1]) * ZeroExtend16(__B.byte[4*j+1])) +/// tmp3.word := Signed(SignExtend16(__A.byte[4*j+2]) * ZeroExtend16(__B.byte[4*j+2])) +/// tmp4.word := Signed(SignExtend16(__A.byte[4*j+3]) * ZeroExtend16(__B.byte[4*j+3])) +/// dst.dword[j] := __W.dword[j] + tmp1 + tmp2 + tmp3 + tmp4 +/// ENDFOR +/// dst[MAX:128] := 0 +/// \endcode +static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_dpbsud_epi32(__m128i __W, + __m128i __A, + __m128i __B) { + return (__m128i)__builtin_ia32_vpdpbsud128((__v4si)__W, (__v4si)__A, + (__v4si)__B); +} + +/// Multiply groups of 4 adjacent pairs of signed 8-bit integers in \a __A with +/// corresponding unsigned 8-bit integers in \a __B, producing 4 intermediate +/// signed 16-bit results. Sum these 4 results with the corresponding +/// 32-bit integer in \a __W, and store the packed 32-bit results in \a dst. +/// +/// \headerfile +/// +/// \code +/// _mm256_dpbsud_epi32(__m256i __W, __m256i __A, __m256i __B); +/// \endcode +/// +/// This intrinsic corresponds to the \c VPDPBSSD instruction. +/// +/// \param __A +/// A 256-bit vector of [32 x char]. +/// \param __B +/// A 256-bit vector of [32 x unsigned char]. +/// \returns +/// A 256-bit vector of [8 x int]. +/// +/// \code{.operation} +/// FOR j := 0 to 7 +/// tmp1.word := Signed(SignExtend16(__A.byte[4*j]) * ZeroExtend16(__B.byte[4*j])) +/// tmp2.word := Signed(SignExtend16(__A.byte[4*j+1]) * ZeroExtend16(__B.byte[4*j+1])) +/// tmp3.word := Signed(SignExtend16(__A.byte[4*j+2]) * ZeroExtend16(__B.byte[4*j+2])) +/// tmp4.word := Signed(SignExtend16(__A.byte[4*j+3]) * ZeroExtend16(__B.byte[4*j+3])) +/// dst.dword[j] := __W.dword[j] + tmp1 + tmp2 + tmp3 + tmp4 +/// ENDFOR +/// dst[MAX:256] := 0 +/// \endcode +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_dpbsud_epi32(__m256i __W, __m256i __A, __m256i __B) { + return (__m256i)__builtin_ia32_vpdpbsud256((__v8si)__W, (__v8si)__A, + (__v8si)__B); +} + +/// Multiply groups of 4 adjacent pairs of signed 8-bit integers in \a __A with +/// corresponding unsigned 8-bit integers in \a __B, producing 4 intermediate +/// signed 16-bit results. Sum these 4 results with the corresponding +/// 32-bit integer in \a __W with signed saturation, and store the packed +/// 32-bit results in \a dst. +/// +/// \headerfile +/// +/// \code +/// _mm_dpbsuds_epi32( __m128i __W, __m128i __A, __m128i __B); +/// \endcode +/// +/// This intrinsic corresponds to the \c VPDPBSSD instruction. +/// +/// \param __A +/// A 128-bit vector of [16 x char]. +/// \param __B +/// A 128-bit vector of [16 x unsigned char]. +/// \returns +/// A 128-bit vector of [4 x int]. +/// +/// \code{.operation} +/// FOR j := 0 to 3 +/// tmp1.word := Signed(SignExtend16(__A.byte[4*j]) * ZeroExtend16(__B.byte[4*j])) +/// tmp2.word := Signed(SignExtend16(__A.byte[4*j+1]) * ZeroExtend16(__B.byte[4*j+1])) +/// tmp3.word := Signed(SignExtend16(__A.byte[4*j+2]) * ZeroExtend16(__B.byte[4*j+2])) +/// tmp4.word := Signed(SignExtend16(__A.byte[4*j+3]) * ZeroExtend16(__B.byte[4*j+3])) +/// dst.dword[j] := SIGNED_DWORD_SATURATE(__W.dword[j] + tmp1 + tmp2 + tmp3 + tmp4) +/// ENDFOR +/// dst[MAX:128] := 0 +/// \endcode +static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_dpbsuds_epi32(__m128i __W, + __m128i __A, + __m128i __B) { + return (__m128i)__builtin_ia32_vpdpbsuds128((__v4si)__W, (__v4si)__A, + (__v4si)__B); +} + +/// Multiply groups of 4 adjacent pairs of signed 8-bit integers in \a __A with +/// corresponding unsigned 8-bit integers in \a __B, producing 4 intermediate +/// signed 16-bit results. Sum these 4 results with the corresponding +/// 32-bit integer in \a __W with signed saturation, and store the packed +/// 32-bit results in \a dst. +/// +/// \headerfile +/// +/// \code +/// _mm256_dpbsuds_epi32(__m256i __W, __m256i __A, __m256i __B); +/// \endcode +/// +/// This intrinsic corresponds to the \c VPDPBSSD instruction. +/// +/// \param __A +/// A 256-bit vector of [32 x char]. +/// \param __B +/// A 256-bit vector of [32 x unsigned char]. +/// \returns +/// A 256-bit vector of [8 x int]. +/// +/// \code{.operation} +/// FOR j := 0 to 7 +/// tmp1.word := Signed(SignExtend16(__A.byte[4*j]) * ZeroExtend16(__B.byte[4*j])) +/// tmp2.word := Signed(SignExtend16(__A.byte[4*j+1]) * ZeroExtend16(__B.byte[4*j+1])) +/// tmp3.word := Signed(SignExtend16(__A.byte[4*j+2]) * ZeroExtend16(__B.byte[4*j+2])) +/// tmp4.word := Signed(SignExtend16(__A.byte[4*j+3]) * ZeroExtend16(__B.byte[4*j+3])) +/// dst.dword[j] := SIGNED_DWORD_SATURATE(__W.dword[j] + tmp1 + tmp2 + tmp3 + tmp4) +/// ENDFOR +/// dst[MAX:256] := 0 +/// \endcode +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_dpbsuds_epi32(__m256i __W, __m256i __A, __m256i __B) { + return (__m256i)__builtin_ia32_vpdpbsuds256((__v8si)__W, (__v8si)__A, + (__v8si)__B); +} + +/// Multiply groups of 4 adjacent pairs of unsigned 8-bit integers in \a __A with +/// corresponding unsigned 8-bit integers in \a __B, producing 4 intermediate +/// signed 16-bit results. Sum these 4 results with the corresponding +/// 32-bit integer in \a __W, and store the packed 32-bit results in \a dst. +/// +/// \headerfile +/// +/// \code +/// _mm_dpbuud_epi32(__m128i __W, __m128i __A, __m128i __B); +/// \endcode +/// +/// This intrinsic corresponds to the \c VPDPBSSD instruction. +/// +/// \param __A +/// A 128-bit vector of [16 x unsigned char]. +/// \param __B +/// A 128-bit vector of [16 x unsigned char]. +/// \returns +/// A 128-bit vector of [4 x int]. +/// +/// \code{.operation} +/// FOR j := 0 to 3 +/// tmp1.word := ZeroExtend16(__A.byte[4*j]) * ZeroExtend16(__B.byte[4*j]) +/// tmp2.word := ZeroExtend16(__A.byte[4*j+1]) * ZeroExtend16(__B.byte[4*j+1]) +/// tmp3.word := ZeroExtend16(__A.byte[4*j+2]) * ZeroExtend16(__B.byte[4*j+2]) +/// tmp4.word := ZeroExtend16(__A.byte[4*j+3]) * ZeroExtend16(__B.byte[4*j+3]) +/// dst.dword[j] := __W.dword[j] + tmp1 + tmp2 + tmp3 + tmp4 +/// ENDFOR +/// dst[MAX:128] := 0 +/// \endcode +static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_dpbuud_epi32(__m128i __W, + __m128i __A, + __m128i __B) { + return (__m128i)__builtin_ia32_vpdpbuud128((__v4si)__W, (__v4si)__A, + (__v4si)__B); +} + +/// Multiply groups of 4 adjacent pairs of unsigned 8-bit integers in \a __A with +/// corresponding unsigned 8-bit integers in \a __B, producing 4 intermediate +/// signed 16-bit results. Sum these 4 results with the corresponding +/// 32-bit integer in \a __W, and store the packed 32-bit results in \a dst. +/// +/// \headerfile +/// +/// \code +/// _mm256_dpbuud_epi32(__m256i __W, __m256i __A, __m256i __B); +/// \endcode +/// +/// This intrinsic corresponds to the \c VPDPBSSD instruction. +/// +/// \param __A +/// A 256-bit vector of [32 x unsigned char]. +/// \param __B +/// A 256-bit vector of [32 x unsigned char]. +/// \returns +/// A 256-bit vector of [8 x int]. +/// +/// \code{.operation} +/// FOR j := 0 to 7 +/// tmp1.word := ZeroExtend16(__A.byte[4*j]) * ZeroExtend16(__B.byte[4*j]) +/// tmp2.word := ZeroExtend16(__A.byte[4*j+1]) * ZeroExtend16(__B.byte[4*j+1]) +/// tmp3.word := ZeroExtend16(__A.byte[4*j+2]) * ZeroExtend16(__B.byte[4*j+2]) +/// tmp4.word := ZeroExtend16(__A.byte[4*j+3]) * ZeroExtend16(__B.byte[4*j+3]) +/// dst.dword[j] := __W.dword[j] + tmp1 + tmp2 + tmp3 + tmp4 +/// ENDFOR +/// dst[MAX:256] := 0 +/// \endcode +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_dpbuud_epi32(__m256i __W, __m256i __A, __m256i __B) { + return (__m256i)__builtin_ia32_vpdpbuud256((__v8si)__W, (__v8si)__A, + (__v8si)__B); +} + +/// Multiply groups of 4 adjacent pairs of unsigned 8-bit integers in \a __A with +/// corresponding unsigned 8-bit integers in \a __B, producing 4 intermediate +/// signed 16-bit results. Sum these 4 results with the corresponding +/// 32-bit integer in \a __W with signed saturation, and store the packed +/// 32-bit results in \a dst. +/// +/// \headerfile +/// +/// \code +/// _mm_dpbuuds_epi32( __m128i __W, __m128i __A, __m128i __B); +/// \endcode +/// +/// This intrinsic corresponds to the \c VPDPBUUDS instruction. +/// +/// \param __A +/// A 128-bit vector of [16 x unsigned char]. +/// \param __B +/// A 128-bit vector of [16 x unsigned char]. +/// \returns +/// A 128-bit vector of [4 x int]. +/// +/// \code{.operation} +/// FOR j := 0 to 3 +/// tmp1.word := ZeroExtend16(__A.byte[4*j]) * ZeroExtend16(__B.byte[4*j]) +/// tmp2.word := ZeroExtend16(__A.byte[4*j+1]) * ZeroExtend16(__B.byte[4*j+1]) +/// tmp3.word := ZeroExtend16(__A.byte[4*j+2]) * ZeroExtend16(__B.byte[4*j+2]) +/// tmp4.word := ZeroExtend16(__A.byte[4*j+3]) * ZeroExtend16(__B.byte[4*j+3]) +/// dst.dword[j] := UNSIGNED_DWORD_SATURATE(__W.dword[j] + tmp1 + tmp2 + tmp3 + tmp4) +/// ENDFOR +/// dst[MAX:128] := 0 +/// \endcode +static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_dpbuuds_epi32(__m128i __W, + __m128i __A, + __m128i __B) { + return (__m128i)__builtin_ia32_vpdpbuuds128((__v4si)__W, (__v4si)__A, + (__v4si)__B); +} + +/// Multiply groups of 4 adjacent pairs of signed 8-bit integers in \a __A with +/// corresponding unsigned 8-bit integers in \a __B, producing 4 intermediate +/// signed 16-bit results. Sum these 4 results with the corresponding +/// 32-bit integer in \a __W with signed saturation, and store the packed +/// 32-bit results in \a dst. +/// +/// \headerfile +/// +/// \code +/// _mm256_dpbuuds_epi32(__m256i __W, __m256i __A, __m256i __B); +/// \endcode +/// +/// This intrinsic corresponds to the \c VPDPBUUDS instruction. +/// +/// \param __A +/// A 256-bit vector of [32 x unsigned char]. +/// \param __B +/// A 256-bit vector of [32 x unsigned char]. +/// \returns +/// A 256-bit vector of [8 x int]. +/// +/// \code{.operation} +/// FOR j := 0 to 7 +/// tmp1.word := ZeroExtend16(__A.byte[4*j]) * ZeroExtend16(__B.byte[4*j]) +/// tmp2.word := ZeroExtend16(__A.byte[4*j+1]) * ZeroExtend16(__B.byte[4*j+1]) +/// tmp3.word := ZeroExtend16(__A.byte[4*j+2]) * ZeroExtend16(__B.byte[4*j+2]) +/// tmp4.word := ZeroExtend16(__A.byte[4*j+3]) * ZeroExtend16(__B.byte[4*j+3]) +/// dst.dword[j] := UNSIGNED_DWORD_SATURATE(__W.dword[j] + tmp1 + tmp2 + tmp3 + tmp4) +/// ENDFOR +/// dst[MAX:256] := 0 +/// \endcode +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_dpbuuds_epi32(__m256i __W, __m256i __A, __m256i __B) { + return (__m256i)__builtin_ia32_vpdpbuuds256((__v8si)__W, (__v8si)__A, + (__v8si)__B); +} +#undef __DEFAULT_FN_ATTRS128 +#undef __DEFAULT_FN_ATTRS256 + +#endif // __AVXVNNIINT8INTRIN_H diff --git a/lib/include/cmpccxaddintrin.h b/lib/include/cmpccxaddintrin.h new file mode 100644 index 0000000000..6957498996 --- /dev/null +++ b/lib/include/cmpccxaddintrin.h @@ -0,0 +1,70 @@ +/*===--------------- cmpccxaddintrin.h - CMPCCXADD intrinsics--------------=== + * + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ +#ifndef __X86GPRINTRIN_H +#error \ + "Never use directly; include instead." +#endif // __X86GPRINTRIN_H + +#ifndef __CMPCCXADDINTRIN_H +#define __CMPCCXADDINTRIN_H +#ifdef __x86_64__ + +typedef enum { + _CMPCCX_O, /* Overflow. */ + _CMPCCX_NO, /* No overflow. */ + _CMPCCX_B, /* Below. */ + _CMPCCX_NB, /* Not below. */ + _CMPCCX_Z, /* Zero. */ + _CMPCCX_NZ, /* Not zero. */ + _CMPCCX_BE, /* Below or equal. */ + _CMPCCX_NBE, /* Neither below nor equal. */ + _CMPCCX_S, /* Sign. */ + _CMPCCX_NS, /* No sign. */ + _CMPCCX_P, /* Parity. */ + _CMPCCX_NP, /* No parity. */ + _CMPCCX_L, /* Less. */ + _CMPCCX_NL, /* Not less. */ + _CMPCCX_LE, /* Less or equal. */ + _CMPCCX_NLE, /* Neither less nor equal. */ +} _CMPCCX_ENUM; + +/// Compares the value from the memory __A with the value of __B. If the +/// specified condition __D is met, then add the third operand __C to the +/// __A and write it into __A, else the value of __A is unchanged. The return +/// value is the original value of __A. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c CMPCCXADD instructions. +/// +/// \param __A +/// __A pointer specifying the memory address. +/// +/// \param __B +/// A integer operand. +/// +/// \param __C +/// A integer operand. +/// +/// \param __D +/// The specified condition. +/// +/// \returns a integer which is the original value of first operand. + +#define _cmpccxadd_epi32(__A, __B, __C, __D) \ + ((int)(__builtin_ia32_cmpccxadd32((void *)(__A), (int)(__B), (int)(__C), \ + (int)(__D)))) + +#define _cmpccxadd_epi64(__A, __B, __C, __D) \ + ((long long)(__builtin_ia32_cmpccxadd64((void *)(__A), (long long)(__B), \ + (long long)(__C), (int)(__D)))) + +#endif // __x86_64__ +#endif // __CMPCCXADDINTRIN_H diff --git a/lib/include/cpuid.h b/lib/include/cpuid.h index caa0069c2e..1ad6853a97 100644 --- a/lib/include/cpuid.h +++ b/lib/include/cpuid.h @@ -200,9 +200,18 @@ #define bit_AMXINT8 0x02000000 /* Features in %eax for leaf 7 sub-leaf 1 */ +#define bit_RAOINT 0x00000008 #define bit_AVXVNNI 0x00000010 #define bit_AVX512BF16 0x00000020 +#define bit_CMPCCXADD 0x00000080 +#define bit_AMXFP16 0x00200000 #define bit_HRESET 0x00400000 +#define bit_AVXIFMA 0x00800000 + +/* Features in %edx for leaf 7 sub-leaf 1 */ +#define bit_AVXVNNIINT8 0x00000010 +#define bit_AVXNECONVERT 0x00000020 +#define bit_PREFETCHI 0x00004000 /* Features in %eax for leaf 13 sub-leaf 1 */ #define bit_XSAVEOPT 0x00000001 @@ -261,7 +270,8 @@ : "0"(__leaf), "2"(__count)) #endif -static __inline int __get_cpuid_max (unsigned int __leaf, unsigned int *__sig) +static __inline unsigned int __get_cpuid_max (unsigned int __leaf, + unsigned int *__sig) { unsigned int __eax, __ebx, __ecx, __edx; #if __i386__ diff --git a/lib/include/cuda_wrappers/cmath b/lib/include/cuda_wrappers/cmath new file mode 100644 index 0000000000..45f89beec9 --- /dev/null +++ b/lib/include/cuda_wrappers/cmath @@ -0,0 +1,90 @@ +/*===---- cmath - CUDA wrapper for ---------------------------------=== + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + * + *===-----------------------------------------------------------------------=== + */ + +#ifndef __CLANG_CUDA_WRAPPERS_CMATH +#define __CLANG_CUDA_WRAPPERS_CMATH + +#include_next + +#if defined(_LIBCPP_STD_VER) + +// libc++ will need long double variants of these functions, but CUDA does not +// provide them. We'll provide their declarations, which should allow the +// headers to parse, but would not allow accidental use of them on a GPU. + +__attribute__((device)) long double logb(long double); +__attribute__((device)) long double scalbn(long double, int); + +namespace std { + +// For __constexpr_fmin/fmax we only need device-side overloads before c++14 +// where they are not constexpr. +#if _LIBCPP_STD_VER < 14 + +__attribute__((device)) +inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 float __constexpr_fmax(float __x, float __y) _NOEXCEPT { + return __builtin_fmaxf(__x, __y); +} + +__attribute__((device)) +inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 double __constexpr_fmax(double __x, double __y) _NOEXCEPT { + return __builtin_fmax(__x, __y); +} + +__attribute__((device)) +inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 long double +__constexpr_fmax(long double __x, long double __y) _NOEXCEPT { + return __builtin_fmaxl(__x, __y); +} + +template ::value && is_arithmetic<_Up>::value, int> = 0> +__attribute__((device)) +_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 typename __promote<_Tp, _Up>::type +__constexpr_fmax(_Tp __x, _Up __y) _NOEXCEPT { + using __result_type = typename __promote<_Tp, _Up>::type; + return std::__constexpr_fmax(static_cast<__result_type>(__x), static_cast<__result_type>(__y)); +} +#endif // _LIBCPP_STD_VER < 14 + +// For logb/scalbn templates we must always provide device overloads because +// libc++ implementation uses __builtin_XXX which gets translated into a libcall +// which we can't handle on GPU. We need to forward those to CUDA-provided +// implementations. + +template +__attribute__((device)) +_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 _Tp __constexpr_logb(_Tp __x) { + return ::logb(__x); +} + +template +__attribute__((device)) +_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 _Tp __constexpr_scalbn(_Tp __x, int __exp) { + return ::scalbn(__x, __exp); +} + +} // namespace std// + +#endif // _LIBCPP_STD_VER + +#endif // include guard diff --git a/lib/include/emmintrin.h b/lib/include/emmintrin.h index a3f56e832b..064d974936 100644 --- a/lib/include/emmintrin.h +++ b/lib/include/emmintrin.h @@ -38,6 +38,16 @@ typedef unsigned char __v16qu __attribute__((__vector_size__(16))); * appear in the interface though. */ typedef signed char __v16qs __attribute__((__vector_size__(16))); +#ifdef __SSE2__ +/* Both _Float16 and __bf16 require SSE2 being enabled. */ +typedef _Float16 __v8hf __attribute__((__vector_size__(16), __aligned__(16))); +typedef _Float16 __m128h __attribute__((__vector_size__(16), __aligned__(16))); +typedef _Float16 __m128h_u __attribute__((__vector_size__(16), __aligned__(1))); + +typedef __bf16 __v8bf __attribute__((__vector_size__(16), __aligned__(16))); +typedef __bf16 __m128bh __attribute__((__vector_size__(16), __aligned__(16))); +#endif + /* Define the default attributes for the functions in this file. */ #define __DEFAULT_FN_ATTRS \ __attribute__((__always_inline__, __nodebug__, __target__("sse2"), \ @@ -1809,7 +1819,7 @@ static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_setr_pd(double __w, /// \returns An initialized 128-bit floating-point vector of [2 x double] with /// all elements set to zero. static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_setzero_pd(void) { - return __extension__(__m128d){0, 0}; + return __extension__(__m128d){0.0, 0.0}; } /// Constructs a 128-bit floating-point vector of [2 x double]. The lower diff --git a/lib/include/float.h b/lib/include/float.h index c6a6cc0846..0e73bca0a2 100644 --- a/lib/include/float.h +++ b/lib/include/float.h @@ -38,9 +38,10 @@ # undef FLT_MANT_DIG # undef DBL_MANT_DIG # undef LDBL_MANT_DIG -# if __STDC_VERSION__ >= 199901L || !defined(__STRICT_ANSI__) || \ - __cplusplus >= 201103L || \ - (__STDC_HOSTED__ && defined(_AIX) && defined(_ALL_SOURCE)) +#if (defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L) || \ + !defined(__STRICT_ANSI__) || \ + (defined(__cplusplus) && __cplusplus >= 201103L) || \ + (__STDC_HOSTED__ && defined(_AIX) && defined(_ALL_SOURCE)) # undef DECIMAL_DIG # endif # undef FLT_DIG @@ -67,9 +68,10 @@ # undef FLT_MIN # undef DBL_MIN # undef LDBL_MIN -# if __STDC_VERSION__ >= 201112L || !defined(__STRICT_ANSI__) || \ - __cplusplus >= 201703L || \ - (__STDC_HOSTED__ && defined(_AIX) && defined(_ALL_SOURCE)) +#if (defined(__STDC_VERSION__) && __STDC_VERSION__ >= 201112L) || \ + !defined(__STRICT_ANSI__) || \ + (defined(__cplusplus) && __cplusplus >= 201703L) || \ + (__STDC_HOSTED__ && defined(_AIX) && defined(_ALL_SOURCE)) # undef FLT_TRUE_MIN # undef DBL_TRUE_MIN # undef LDBL_TRUE_MIN @@ -84,7 +86,10 @@ /* Characteristics of floating point types, C99 5.2.4.2.2 */ +#if (defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L) || \ + (defined(__cplusplus) && __cplusplus >= 201103L) #define FLT_EVAL_METHOD __FLT_EVAL_METHOD__ +#endif #define FLT_ROUNDS (__builtin_flt_rounds()) #define FLT_RADIX __FLT_RADIX__ @@ -92,8 +97,9 @@ #define DBL_MANT_DIG __DBL_MANT_DIG__ #define LDBL_MANT_DIG __LDBL_MANT_DIG__ -#if __STDC_VERSION__ >= 199901L || !defined(__STRICT_ANSI__) || \ - __cplusplus >= 201103L || \ +#if (defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L) || \ + !defined(__STRICT_ANSI__) || \ + (defined(__cplusplus) && __cplusplus >= 201103L) || \ (__STDC_HOSTED__ && defined(_AIX) && defined(_ALL_SOURCE)) # define DECIMAL_DIG __DECIMAL_DIG__ #endif @@ -130,8 +136,9 @@ #define DBL_MIN __DBL_MIN__ #define LDBL_MIN __LDBL_MIN__ -#if __STDC_VERSION__ >= 201112L || !defined(__STRICT_ANSI__) || \ - __cplusplus >= 201703L || \ +#if (defined(__STDC_VERSION__) && __STDC_VERSION__ >= 201112L) || \ + !defined(__STRICT_ANSI__) || \ + (defined(__cplusplus) && __cplusplus >= 201703L) || \ (__STDC_HOSTED__ && defined(_AIX) && defined(_ALL_SOURCE)) # define FLT_TRUE_MIN __FLT_DENORM_MIN__ # define DBL_TRUE_MIN __DBL_DENORM_MIN__ diff --git a/lib/include/gfniintrin.h b/lib/include/gfniintrin.h index a59238b0b1..5ec53c54fc 100644 --- a/lib/include/gfniintrin.h +++ b/lib/include/gfniintrin.h @@ -20,10 +20,12 @@ /* Default attributes for YMM unmasked form. */ #define __DEFAULT_FN_ATTRS_Y __attribute__((__always_inline__, __nodebug__, __target__("avx,gfni"), __min_vector_width__(256))) -/* Default attributes for ZMM forms. */ -#define __DEFAULT_FN_ATTRS_Z __attribute__((__always_inline__, __nodebug__, __target__("avx512bw,gfni"), __min_vector_width__(512))) +/* Default attributes for ZMM unmasked forms. */ +#define __DEFAULT_FN_ATTRS_Z __attribute__((__always_inline__, __nodebug__, __target__("avx512f,gfni"), __min_vector_width__(512))) +/* Default attributes for ZMM masked forms. */ +#define __DEFAULT_FN_ATTRS_Z_MASK __attribute__((__always_inline__, __nodebug__, __target__("avx512bw,gfni"), __min_vector_width__(512))) -/* Default attributes for VLX forms. */ +/* Default attributes for VLX masked forms. */ #define __DEFAULT_FN_ATTRS_VL128 __attribute__((__always_inline__, __nodebug__, __target__("avx512bw,avx512vl,gfni"), __min_vector_width__(128))) #define __DEFAULT_FN_ATTRS_VL256 __attribute__((__always_inline__, __nodebug__, __target__("avx512bw,avx512vl,gfni"), __min_vector_width__(256))) @@ -99,7 +101,7 @@ _mm512_gf2p8mul_epi8(__m512i __A, __m512i __B) (__v64qi) __B); } -static __inline__ __m512i __DEFAULT_FN_ATTRS_Z +static __inline__ __m512i __DEFAULT_FN_ATTRS_Z_MASK _mm512_mask_gf2p8mul_epi8(__m512i __S, __mmask64 __U, __m512i __A, __m512i __B) { return (__m512i) __builtin_ia32_selectb_512(__U, @@ -107,7 +109,7 @@ _mm512_mask_gf2p8mul_epi8(__m512i __S, __mmask64 __U, __m512i __A, __m512i __B) (__v64qi) __S); } -static __inline__ __m512i __DEFAULT_FN_ATTRS_Z +static __inline__ __m512i __DEFAULT_FN_ATTRS_Z_MASK _mm512_maskz_gf2p8mul_epi8(__mmask64 __U, __m512i __A, __m512i __B) { return _mm512_mask_gf2p8mul_epi8((__m512i)_mm512_setzero_si512(), diff --git a/lib/include/hlsl.h b/lib/include/hlsl.h deleted file mode 100644 index a9dce4503d..0000000000 --- a/lib/include/hlsl.h +++ /dev/null @@ -1,15 +0,0 @@ -//===----- hlsl.h - HLSL definitions --------------------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// - -#ifndef _HLSL_H_ -#define _HLSL_H_ - -#include "hlsl/hlsl_basic_types.h" -#include "hlsl/hlsl_intrinsics.h" - -#endif //_HLSL_H_ diff --git a/lib/include/hlsl_basic_types.h b/lib/include/hlsl_basic_types.h deleted file mode 100644 index e68715f1a6..0000000000 --- a/lib/include/hlsl_basic_types.h +++ /dev/null @@ -1,64 +0,0 @@ -//===----- hlsl_basic_types.h - HLSL definitions for basic types ----------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// - -#ifndef _HLSL_HLSL_BASIC_TYPES_H_ -#define _HLSL_HLSL_BASIC_TYPES_H_ - -// built-in scalar data types: - -#ifdef __HLSL_ENABLE_16_BIT -// 16-bit integer. -typedef unsigned short uint16_t; -typedef short int16_t; -#endif - -// unsigned 32-bit integer. -typedef unsigned int uint; - -// 64-bit integer. -typedef unsigned long uint64_t; -typedef long int64_t; - -// built-in vector data types: - -#ifdef __HLSL_ENABLE_16_BIT -typedef vector int16_t2; -typedef vector int16_t3; -typedef vector int16_t4; -typedef vector uint16_t2; -typedef vector uint16_t3; -typedef vector uint16_t4; -#endif - -typedef vector int2; -typedef vector int3; -typedef vector int4; -typedef vector uint2; -typedef vector uint3; -typedef vector uint4; -typedef vector int64_t2; -typedef vector int64_t3; -typedef vector int64_t4; -typedef vector uint64_t2; -typedef vector uint64_t3; -typedef vector uint64_t4; - -#ifdef __HLSL_ENABLE_16_BIT -typedef vector half2; -typedef vector half3; -typedef vector half4; -#endif - -typedef vector float2; -typedef vector float3; -typedef vector float4; -typedef vector double2; -typedef vector double3; -typedef vector double4; - -#endif //_HLSL_HLSL_BASIC_TYPES_H_ diff --git a/lib/include/hlsl_intrinsics.h b/lib/include/hlsl_intrinsics.h deleted file mode 100644 index b5cdb8b449..0000000000 --- a/lib/include/hlsl_intrinsics.h +++ /dev/null @@ -1,15 +0,0 @@ -//===----- hlsl_intrinsics.h - HLSL definitions for intrinsics ----------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// - -#ifndef _HLSL_HLSL_INTRINSICS_H_ -#define _HLSL_HLSL_INTRINSICS_H_ - -__attribute__((clang_builtin_alias(__builtin_hlsl_wave_active_count_bits))) uint -WaveActiveCountBits(bool bBit); - -#endif //_HLSL_HLSL_INTRINSICS_H_ diff --git a/lib/include/immintrin.h b/lib/include/immintrin.h index e4d7a799b1..6967b46fdb 100644 --- a/lib/include/immintrin.h +++ b/lib/include/immintrin.h @@ -189,6 +189,11 @@ #include #endif +#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ + defined(__AVXIFMA__) +#include +#endif + #if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ defined(__AVX512VBMI__) #include @@ -214,17 +219,13 @@ #include #endif -/* - * FIXME: _Float16 type is legal only when HW support float16 operation. - * We use __AVX512FP16__ to identify if float16 is supported or not, so - * when float16 is not supported, the related header is not included. - * - */ -#if defined(__AVX512FP16__) +#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ + defined(__AVX512FP16__) #include #endif -#if defined(__AVX512FP16__) && defined(__AVX512VL__) +#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ + (defined(__AVX512VL__) && defined(__AVX512FP16__)) #include #endif @@ -258,6 +259,16 @@ #include #endif +#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ + defined(__AVXVNNIINT8__) +#include +#endif + +#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ + defined(__AVXNECONVERT__) +#include +#endif + #if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ defined(__RDPID__) /// Returns the value of the IA32_TSC_AUX MSR (0xc0000103). @@ -291,6 +302,23 @@ _rdrand64_step(unsigned long long *__p) { return (int)__builtin_ia32_rdrand64_step(__p); } +#else +// We need to emulate the functionality of 64-bit rdrand with 2 32-bit +// rdrand instructions. +static __inline__ int __attribute__((__always_inline__, __nodebug__, __target__("rdrnd"))) +_rdrand64_step(unsigned long long *__p) +{ + unsigned int __lo, __hi; + unsigned int __res_lo = __builtin_ia32_rdrand32_step(&__lo); + unsigned int __res_hi = __builtin_ia32_rdrand32_step(&__hi); + if (__res_lo && __res_hi) { + *__p = ((unsigned long long)__hi << 32) | (unsigned long long)__lo; + return 1; + } else { + *__p = 0; + return 0; + } +} #endif #endif /* __RDRND__ */ @@ -495,6 +523,10 @@ _storebe_i64(void * __P, long long __D) { defined(__INVPCID__) #include #endif +#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ + defined(__AMXFP16__) +#include +#endif #if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ defined(__KL__) || defined(__WIDEKL__) diff --git a/lib/include/larchintrin.h b/lib/include/larchintrin.h new file mode 100644 index 0000000000..c5c533ee0b --- /dev/null +++ b/lib/include/larchintrin.h @@ -0,0 +1,234 @@ +/*===------------ larchintrin.h - LoongArch intrinsics ---------------------=== + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ + +#ifndef _LOONGARCH_BASE_INTRIN_H +#define _LOONGARCH_BASE_INTRIN_H + +#ifdef __cplusplus +extern "C" { +#endif + +typedef struct rdtime { + unsigned int value; + unsigned int timeid; +} __rdtime_t; + +#if __loongarch_grlen == 64 +typedef struct drdtime { + unsigned long dvalue; + unsigned long dtimeid; +} __drdtime_t; + +extern __inline __drdtime_t + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + __rdtime_d(void) { + __drdtime_t __drdtime; + __asm__ volatile( + "rdtime.d %[val], %[tid]\n\t" + : [val] "=&r"(__drdtime.dvalue), [tid] "=&r"(__drdtime.dtimeid)); + return __drdtime; +} +#endif + +extern __inline __rdtime_t + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + __rdtimeh_w(void) { + __rdtime_t __rdtime; + __asm__ volatile("rdtimeh.w %[val], %[tid]\n\t" + : [val] "=&r"(__rdtime.value), [tid] "=&r"(__rdtime.timeid)); + return __rdtime; +} + +extern __inline __rdtime_t + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + __rdtimel_w(void) { + __rdtime_t __rdtime; + __asm__ volatile("rdtimel.w %[val], %[tid]\n\t" + : [val] "=&r"(__rdtime.value), [tid] "=&r"(__rdtime.timeid)); + return __rdtime; +} + +#if __loongarch_grlen == 64 +extern __inline int + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + __crc_w_b_w(char _1, int _2) { + return (int)__builtin_loongarch_crc_w_b_w((char)_1, (int)_2); +} + +extern __inline int + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + __crc_w_h_w(short _1, int _2) { + return (int)__builtin_loongarch_crc_w_h_w((short)_1, (int)_2); +} + +extern __inline int + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + __crc_w_w_w(int _1, int _2) { + return (int)__builtin_loongarch_crc_w_w_w((int)_1, (int)_2); +} + +extern __inline int + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + __crc_w_d_w(long int _1, int _2) { + return (int)__builtin_loongarch_crc_w_d_w((long int)_1, (int)_2); +} + +extern __inline int + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + __crcc_w_b_w(char _1, int _2) { + return (int)__builtin_loongarch_crcc_w_b_w((char)_1, (int)_2); +} + +extern __inline int + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + __crcc_w_h_w(short _1, int _2) { + return (int)__builtin_loongarch_crcc_w_h_w((short)_1, (int)_2); +} + +extern __inline int + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + __crcc_w_w_w(int _1, int _2) { + return (int)__builtin_loongarch_crcc_w_w_w((int)_1, (int)_2); +} + +extern __inline int + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + __crcc_w_d_w(long int _1, int _2) { + return (int)__builtin_loongarch_crcc_w_d_w((long int)_1, (int)_2); +} +#endif + +#define __break(/*ui15*/ _1) __builtin_loongarch_break((_1)) + +#if __loongarch_grlen == 32 +#define __cacop_w(/*uimm5*/ _1, /*unsigned int*/ _2, /*simm12*/ _3) \ + ((void)__builtin_loongarch_cacop_w((_1), (unsigned int)(_2), (_3))) +#endif + +#if __loongarch_grlen == 64 +#define __cacop_d(/*uimm5*/ _1, /*unsigned long int*/ _2, /*simm12*/ _3) \ + ((void)__builtin_loongarch_cacop_d((_1), (unsigned long int)(_2), (_3))) +#endif + +#define __dbar(/*ui15*/ _1) __builtin_loongarch_dbar((_1)) + +#define __ibar(/*ui15*/ _1) __builtin_loongarch_ibar((_1)) + +#define __movfcsr2gr(/*ui5*/ _1) __builtin_loongarch_movfcsr2gr((_1)); + +#define __movgr2fcsr(/*ui5*/ _1, _2) \ + __builtin_loongarch_movgr2fcsr((_1), (unsigned int)_2); + +#define __syscall(/*ui15*/ _1) __builtin_loongarch_syscall((_1)) + +#define __csrrd_w(/*ui14*/ _1) ((unsigned int)__builtin_loongarch_csrrd_w((_1))) + +#define __csrwr_w(/*unsigned int*/ _1, /*ui14*/ _2) \ + ((unsigned int)__builtin_loongarch_csrwr_w((unsigned int)(_1), (_2))) + +#define __csrxchg_w(/*unsigned int*/ _1, /*unsigned int*/ _2, /*ui14*/ _3) \ + ((unsigned int)__builtin_loongarch_csrxchg_w((unsigned int)(_1), \ + (unsigned int)(_2), (_3))) + +#if __loongarch_grlen == 64 +#define __csrrd_d(/*ui14*/ _1) \ + ((unsigned long int)__builtin_loongarch_csrrd_d((_1))) + +#define __csrwr_d(/*unsigned long int*/ _1, /*ui14*/ _2) \ + ((unsigned long int)__builtin_loongarch_csrwr_d((unsigned long int)(_1), \ + (_2))) + +#define __csrxchg_d(/*unsigned long int*/ _1, /*unsigned long int*/ _2, \ + /*ui14*/ _3) \ + ((unsigned long int)__builtin_loongarch_csrxchg_d( \ + (unsigned long int)(_1), (unsigned long int)(_2), (_3))) +#endif + +extern __inline unsigned char + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + __iocsrrd_b(unsigned int _1) { + return (unsigned char)__builtin_loongarch_iocsrrd_b((unsigned int)_1); +} + +extern __inline unsigned char + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + __iocsrrd_h(unsigned int _1) { + return (unsigned short)__builtin_loongarch_iocsrrd_h((unsigned int)_1); +} + +extern __inline unsigned int + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + __iocsrrd_w(unsigned int _1) { + return (unsigned int)__builtin_loongarch_iocsrrd_w((unsigned int)_1); +} + +#if __loongarch_grlen == 64 +extern __inline unsigned long int + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + __iocsrrd_d(unsigned int _1) { + return (unsigned long int)__builtin_loongarch_iocsrrd_d((unsigned int)_1); +} +#endif + +extern __inline void + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + __iocsrwr_b(unsigned char _1, unsigned int _2) { + __builtin_loongarch_iocsrwr_b((unsigned char)_1, (unsigned int)_2); +} + +extern __inline void + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + __iocsrwr_h(unsigned short _1, unsigned int _2) { + __builtin_loongarch_iocsrwr_h((unsigned short)_1, (unsigned int)_2); +} + +extern __inline void + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + __iocsrwr_w(unsigned int _1, unsigned int _2) { + __builtin_loongarch_iocsrwr_w((unsigned int)_1, (unsigned int)_2); +} + +extern __inline unsigned int + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + __cpucfg(unsigned int _1) { + return (unsigned int)__builtin_loongarch_cpucfg((unsigned int)_1); +} + +#if __loongarch_grlen == 64 +extern __inline void + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + __iocsrwr_d(unsigned long int _1, unsigned int _2) { + __builtin_loongarch_iocsrwr_d((unsigned long int)_1, (unsigned int)_2); +} + +extern __inline void + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + __asrtgt_d(long int _1, long int _2) { + __builtin_loongarch_asrtgt_d((long int)_1, (long int)_2); +} + +extern __inline void + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + __asrtle_d(long int _1, long int _2) { + __builtin_loongarch_asrtle_d((long int)_1, (long int)_2); +} +#endif + +#if __loongarch_grlen == 64 +#define __lddir_d(/*long int*/ _1, /*ui5*/ _2) \ + ((long int)__builtin_loongarch_lddir_d((long int)(_1), (_2))) + +#define __ldpte_d(/*long int*/ _1, /*ui5*/ _2) \ + ((void)__builtin_loongarch_ldpte_d((long int)(_1), (_2))) +#endif + +#ifdef __cplusplus +} +#endif +#endif /* _LOONGARCH_BASE_INTRIN_H */ diff --git a/lib/include/limits.h b/lib/include/limits.h index cfd23a219e..32cc901b26 100644 --- a/lib/include/limits.h +++ b/lib/include/limits.h @@ -65,7 +65,7 @@ /* C2x 5.2.4.2.1 */ /* FIXME: This is using the placeholder dates Clang produces for these macros in C2x mode; switch to the correct values once they've been published. */ -#if __STDC_VERSION__ >= 202000L +#if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 202000L #define BOOL_WIDTH __BOOL_WIDTH__ #define CHAR_WIDTH CHAR_BIT #define SCHAR_WIDTH CHAR_BIT @@ -93,7 +93,8 @@ /* C99 5.2.4.2.1: Added long long. C++11 18.3.3.2: same contents as the Standard C Library header . */ -#if __STDC_VERSION__ >= 199901L || __cplusplus >= 201103L +#if (defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L) || \ + (defined(__cplusplus) && __cplusplus >= 201103L) #undef LLONG_MIN #undef LLONG_MAX diff --git a/lib/include/opencl-c-base.h b/lib/include/opencl-c-base.h index c433b4f7eb..fad2f9c027 100644 --- a/lib/include/opencl-c-base.h +++ b/lib/include/opencl-c-base.h @@ -74,6 +74,25 @@ #define __opencl_c_atomic_scope_all_devices 1 #define __opencl_c_read_write_images 1 #endif // defined(__SPIR__) + +// Undefine any feature macros that have been explicitly disabled using +// an __undef_ macro. +#ifdef __undef___opencl_c_work_group_collective_functions +#undef __opencl_c_work_group_collective_functions +#endif +#ifdef __undef___opencl_c_atomic_order_seq_cst +#undef __opencl_c_atomic_order_seq_cst +#endif +#ifdef __undef___opencl_c_atomic_scope_device +#undef __opencl_c_atomic_scope_device +#endif +#ifdef __undef___opencl_c_atomic_scope_all_devices +#undef __opencl_c_atomic_scope_all_devices +#endif +#ifdef __undef___opencl_c_read_write_images +#undef __opencl_c_read_write_images +#endif + #endif // (__OPENCL_CPP_VERSION__ == 202100 || __OPENCL_C_VERSION__ == 300) #if !defined(__opencl_c_generic_address_space) diff --git a/lib/include/opencl-c.h b/lib/include/opencl-c.h index 72a6bfeafd..288bb18bc6 100644 --- a/lib/include/opencl-c.h +++ b/lib/include/opencl-c.h @@ -12396,11 +12396,11 @@ void __ovld vstorea_half16_rtn(double16, size_t, __private half *); * image objects and then want to read the updated data. */ -void __ovld __conv barrier(cl_mem_fence_flags flags); +void __ovld __conv barrier(cl_mem_fence_flags); #if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0) -void __ovld __conv work_group_barrier(cl_mem_fence_flags flags, memory_scope); -void __ovld __conv work_group_barrier(cl_mem_fence_flags flags); +void __ovld __conv work_group_barrier(cl_mem_fence_flags, memory_scope); +void __ovld __conv work_group_barrier(cl_mem_fence_flags); #endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0) // OpenCL v1.1 s6.11.9, v1.2 s6.12.9 - Explicit Memory Fence Functions @@ -12418,7 +12418,7 @@ void __ovld __conv work_group_barrier(cl_mem_fence_flags flags); * CLK_LOCAL_MEM_FENCE * CLK_GLOBAL_MEM_FENCE. */ -void __ovld mem_fence(cl_mem_fence_flags flags); +void __ovld mem_fence(cl_mem_fence_flags); /** * Read memory barrier that orders only @@ -12430,7 +12430,7 @@ void __ovld mem_fence(cl_mem_fence_flags flags); * CLK_LOCAL_MEM_FENCE * CLK_GLOBAL_MEM_FENCE. */ -void __ovld read_mem_fence(cl_mem_fence_flags flags); +void __ovld read_mem_fence(cl_mem_fence_flags); /** * Write memory barrier that orders only @@ -12442,7 +12442,7 @@ void __ovld read_mem_fence(cl_mem_fence_flags flags); * CLK_LOCAL_MEM_FENCE * CLK_GLOBAL_MEM_FENCE. */ -void __ovld write_mem_fence(cl_mem_fence_flags flags); +void __ovld write_mem_fence(cl_mem_fence_flags); // OpenCL v2.0 s6.13.9 - Address Space Qualifier Functions @@ -12891,29 +12891,29 @@ void __ovld prefetch(const __global half16 *, size_t); * (old + val) and store result at location * pointed by p. The function returns old. */ -int __ovld atomic_add(volatile __global int *p, int val); -uint __ovld atomic_add(volatile __global uint *p, uint val); -int __ovld atomic_add(volatile __local int *p, int val); -uint __ovld atomic_add(volatile __local uint *p, uint val); +int __ovld atomic_add(volatile __global int *, int); +uint __ovld atomic_add(volatile __global uint *, uint); +int __ovld atomic_add(volatile __local int *, int); +uint __ovld atomic_add(volatile __local uint *, uint); #ifdef __OPENCL_CPP_VERSION__ -int __ovld atomic_add(volatile int *p, int val); -uint __ovld atomic_add(volatile uint *p, uint val); +int __ovld atomic_add(volatile int *, int); +uint __ovld atomic_add(volatile uint *, uint); #endif #if defined(cl_khr_global_int32_base_atomics) -int __ovld atom_add(volatile __global int *p, int val); -uint __ovld atom_add(volatile __global uint *p, uint val); +int __ovld atom_add(volatile __global int *, int); +uint __ovld atom_add(volatile __global uint *, uint); #endif #if defined(cl_khr_local_int32_base_atomics) -int __ovld atom_add(volatile __local int *p, int val); -uint __ovld atom_add(volatile __local uint *p, uint val); +int __ovld atom_add(volatile __local int *, int); +uint __ovld atom_add(volatile __local uint *, uint); #endif #if defined(cl_khr_int64_base_atomics) -long __ovld atom_add(volatile __global long *p, long val); -ulong __ovld atom_add(volatile __global ulong *p, ulong val); -long __ovld atom_add(volatile __local long *p, long val); -ulong __ovld atom_add(volatile __local ulong *p, ulong val); +long __ovld atom_add(volatile __global long *, long); +ulong __ovld atom_add(volatile __global ulong *, ulong); +long __ovld atom_add(volatile __local long *, long); +ulong __ovld atom_add(volatile __local ulong *, ulong); #endif /** @@ -12921,29 +12921,29 @@ ulong __ovld atom_add(volatile __local ulong *p, ulong val); * Compute (old - val) and store result at location pointed by p. The function * returns old. */ -int __ovld atomic_sub(volatile __global int *p, int val); -uint __ovld atomic_sub(volatile __global uint *p, uint val); -int __ovld atomic_sub(volatile __local int *p, int val); -uint __ovld atomic_sub(volatile __local uint *p, uint val); +int __ovld atomic_sub(volatile __global int *, int); +uint __ovld atomic_sub(volatile __global uint *, uint); +int __ovld atomic_sub(volatile __local int *, int); +uint __ovld atomic_sub(volatile __local uint *, uint); #ifdef __OPENCL_CPP_VERSION__ -int __ovld atomic_sub(volatile int *p, int val); -uint __ovld atomic_sub(volatile uint *p, uint val); +int __ovld atomic_sub(volatile int *, int); +uint __ovld atomic_sub(volatile uint *, uint); #endif #if defined(cl_khr_global_int32_base_atomics) -int __ovld atom_sub(volatile __global int *p, int val); -uint __ovld atom_sub(volatile __global uint *p, uint val); +int __ovld atom_sub(volatile __global int *, int); +uint __ovld atom_sub(volatile __global uint *, uint); #endif #if defined(cl_khr_local_int32_base_atomics) -int __ovld atom_sub(volatile __local int *p, int val); -uint __ovld atom_sub(volatile __local uint *p, uint val); +int __ovld atom_sub(volatile __local int *, int); +uint __ovld atom_sub(volatile __local uint *, uint); #endif #if defined(cl_khr_int64_base_atomics) -long __ovld atom_sub(volatile __global long *p, long val); -ulong __ovld atom_sub(volatile __global ulong *p, ulong val); -long __ovld atom_sub(volatile __local long *p, long val); -ulong __ovld atom_sub(volatile __local ulong *p, ulong val); +long __ovld atom_sub(volatile __global long *, long); +ulong __ovld atom_sub(volatile __global ulong *, ulong); +long __ovld atom_sub(volatile __local long *, long); +ulong __ovld atom_sub(volatile __local ulong *, ulong); #endif /** @@ -12951,32 +12951,32 @@ ulong __ovld atom_sub(volatile __local ulong *p, ulong val); * with new value given by val. Returns old * value. */ -int __ovld atomic_xchg(volatile __global int *p, int val); -uint __ovld atomic_xchg(volatile __global uint *p, uint val); -int __ovld atomic_xchg(volatile __local int *p, int val); -uint __ovld atomic_xchg(volatile __local uint *p, uint val); -float __ovld atomic_xchg(volatile __global float *p, float val); -float __ovld atomic_xchg(volatile __local float *p, float val); +int __ovld atomic_xchg(volatile __global int *, int); +uint __ovld atomic_xchg(volatile __global uint *, uint); +int __ovld atomic_xchg(volatile __local int *, int); +uint __ovld atomic_xchg(volatile __local uint *, uint); +float __ovld atomic_xchg(volatile __global float *, float); +float __ovld atomic_xchg(volatile __local float *, float); #ifdef __OPENCL_CPP_VERSION__ -int __ovld atomic_xchg(volatile int *p, int val); -uint __ovld atomic_xchg(volatile uint *p, uint val); -float __ovld atomic_xchg(volatile float *p, float val); +int __ovld atomic_xchg(volatile int *, int); +uint __ovld atomic_xchg(volatile uint *, uint); +float __ovld atomic_xchg(volatile float *, float); #endif #if defined(cl_khr_global_int32_base_atomics) -int __ovld atom_xchg(volatile __global int *p, int val); -uint __ovld atom_xchg(volatile __global uint *p, uint val); +int __ovld atom_xchg(volatile __global int *, int); +uint __ovld atom_xchg(volatile __global uint *, uint); #endif #if defined(cl_khr_local_int32_base_atomics) -int __ovld atom_xchg(volatile __local int *p, int val); -uint __ovld atom_xchg(volatile __local uint *p, uint val); +int __ovld atom_xchg(volatile __local int *, int); +uint __ovld atom_xchg(volatile __local uint *, uint); #endif #if defined(cl_khr_int64_base_atomics) -long __ovld atom_xchg(volatile __global long *p, long val); -long __ovld atom_xchg(volatile __local long *p, long val); -ulong __ovld atom_xchg(volatile __global ulong *p, ulong val); -ulong __ovld atom_xchg(volatile __local ulong *p, ulong val); +long __ovld atom_xchg(volatile __global long *, long); +long __ovld atom_xchg(volatile __local long *, long); +ulong __ovld atom_xchg(volatile __global ulong *, ulong); +ulong __ovld atom_xchg(volatile __local ulong *, ulong); #endif /** @@ -13048,29 +13048,29 @@ ulong __ovld atom_dec(volatile __local ulong *); * location pointed by p. The function * returns old. */ -int __ovld atomic_cmpxchg(volatile __global int *p, int cmp, int val); -uint __ovld atomic_cmpxchg(volatile __global uint *p, uint cmp, uint val); -int __ovld atomic_cmpxchg(volatile __local int *p, int cmp, int val); -uint __ovld atomic_cmpxchg(volatile __local uint *p, uint cmp, uint val); +int __ovld atomic_cmpxchg(volatile __global int *, int, int); +uint __ovld atomic_cmpxchg(volatile __global uint *, uint, uint); +int __ovld atomic_cmpxchg(volatile __local int *, int, int); +uint __ovld atomic_cmpxchg(volatile __local uint *, uint, uint); #ifdef __OPENCL_CPP_VERSION__ -int __ovld atomic_cmpxchg(volatile int *p, int cmp, int val); -uint __ovld atomic_cmpxchg(volatile uint *p, uint cmp, uint val); +int __ovld atomic_cmpxchg(volatile int *, int, int); +uint __ovld atomic_cmpxchg(volatile uint *, uint, uint); #endif #if defined(cl_khr_global_int32_base_atomics) -int __ovld atom_cmpxchg(volatile __global int *p, int cmp, int val); -uint __ovld atom_cmpxchg(volatile __global uint *p, uint cmp, uint val); +int __ovld atom_cmpxchg(volatile __global int *, int, int); +uint __ovld atom_cmpxchg(volatile __global uint *, uint, uint); #endif #if defined(cl_khr_local_int32_base_atomics) -int __ovld atom_cmpxchg(volatile __local int *p, int cmp, int val); -uint __ovld atom_cmpxchg(volatile __local uint *p, uint cmp, uint val); +int __ovld atom_cmpxchg(volatile __local int *, int, int); +uint __ovld atom_cmpxchg(volatile __local uint *, uint, uint); #endif #if defined(cl_khr_int64_base_atomics) -long __ovld atom_cmpxchg(volatile __global long *p, long cmp, long val); -ulong __ovld atom_cmpxchg(volatile __global ulong *p, ulong cmp, ulong val); -long __ovld atom_cmpxchg(volatile __local long *p, long cmp, long val); -ulong __ovld atom_cmpxchg(volatile __local ulong *p, ulong cmp, ulong val); +long __ovld atom_cmpxchg(volatile __global long *, long, long); +ulong __ovld atom_cmpxchg(volatile __global ulong *, ulong, ulong); +long __ovld atom_cmpxchg(volatile __local long *, long, long); +ulong __ovld atom_cmpxchg(volatile __local ulong *, ulong, ulong); #endif /** @@ -13080,29 +13080,29 @@ ulong __ovld atom_cmpxchg(volatile __local ulong *p, ulong cmp, ulong val); * location pointed by p. The function * returns old. */ -int __ovld atomic_min(volatile __global int *p, int val); -uint __ovld atomic_min(volatile __global uint *p, uint val); -int __ovld atomic_min(volatile __local int *p, int val); -uint __ovld atomic_min(volatile __local uint *p, uint val); +int __ovld atomic_min(volatile __global int *, int); +uint __ovld atomic_min(volatile __global uint *, uint); +int __ovld atomic_min(volatile __local int *, int); +uint __ovld atomic_min(volatile __local uint *, uint); #ifdef __OPENCL_CPP_VERSION__ -int __ovld atomic_min(volatile int *p, int val); -uint __ovld atomic_min(volatile uint *p, uint val); +int __ovld atomic_min(volatile int *, int); +uint __ovld atomic_min(volatile uint *, uint); #endif #if defined(cl_khr_global_int32_extended_atomics) -int __ovld atom_min(volatile __global int *p, int val); -uint __ovld atom_min(volatile __global uint *p, uint val); +int __ovld atom_min(volatile __global int *, int); +uint __ovld atom_min(volatile __global uint *, uint); #endif #if defined(cl_khr_local_int32_extended_atomics) -int __ovld atom_min(volatile __local int *p, int val); -uint __ovld atom_min(volatile __local uint *p, uint val); +int __ovld atom_min(volatile __local int *, int); +uint __ovld atom_min(volatile __local uint *, uint); #endif #if defined(cl_khr_int64_extended_atomics) -long __ovld atom_min(volatile __global long *p, long val); -ulong __ovld atom_min(volatile __global ulong *p, ulong val); -long __ovld atom_min(volatile __local long *p, long val); -ulong __ovld atom_min(volatile __local ulong *p, ulong val); +long __ovld atom_min(volatile __global long *, long); +ulong __ovld atom_min(volatile __global ulong *, ulong); +long __ovld atom_min(volatile __local long *, long); +ulong __ovld atom_min(volatile __local ulong *, ulong); #endif /** @@ -13112,29 +13112,29 @@ ulong __ovld atom_min(volatile __local ulong *p, ulong val); * location pointed by p. The function * returns old. */ -int __ovld atomic_max(volatile __global int *p, int val); -uint __ovld atomic_max(volatile __global uint *p, uint val); -int __ovld atomic_max(volatile __local int *p, int val); -uint __ovld atomic_max(volatile __local uint *p, uint val); +int __ovld atomic_max(volatile __global int *, int); +uint __ovld atomic_max(volatile __global uint *, uint); +int __ovld atomic_max(volatile __local int *, int); +uint __ovld atomic_max(volatile __local uint *, uint); #ifdef __OPENCL_CPP_VERSION__ -int __ovld atomic_max(volatile int *p, int val); -uint __ovld atomic_max(volatile uint *p, uint val); +int __ovld atomic_max(volatile int *, int); +uint __ovld atomic_max(volatile uint *, uint); #endif #if defined(cl_khr_global_int32_extended_atomics) -int __ovld atom_max(volatile __global int *p, int val); -uint __ovld atom_max(volatile __global uint *p, uint val); +int __ovld atom_max(volatile __global int *, int); +uint __ovld atom_max(volatile __global uint *, uint); #endif #if defined(cl_khr_local_int32_extended_atomics) -int __ovld atom_max(volatile __local int *p, int val); -uint __ovld atom_max(volatile __local uint *p, uint val); +int __ovld atom_max(volatile __local int *, int); +uint __ovld atom_max(volatile __local uint *, uint); #endif #if defined(cl_khr_int64_extended_atomics) -long __ovld atom_max(volatile __global long *p, long val); -ulong __ovld atom_max(volatile __global ulong *p, ulong val); -long __ovld atom_max(volatile __local long *p, long val); -ulong __ovld atom_max(volatile __local ulong *p, ulong val); +long __ovld atom_max(volatile __global long *, long); +ulong __ovld atom_max(volatile __global ulong *, ulong); +long __ovld atom_max(volatile __local long *, long); +ulong __ovld atom_max(volatile __local ulong *, ulong); #endif /** @@ -13143,29 +13143,29 @@ ulong __ovld atom_max(volatile __local ulong *p, ulong val); * (old & val) and store result at location * pointed by p. The function returns old. */ -int __ovld atomic_and(volatile __global int *p, int val); -uint __ovld atomic_and(volatile __global uint *p, uint val); -int __ovld atomic_and(volatile __local int *p, int val); -uint __ovld atomic_and(volatile __local uint *p, uint val); +int __ovld atomic_and(volatile __global int *, int); +uint __ovld atomic_and(volatile __global uint *, uint); +int __ovld atomic_and(volatile __local int *, int); +uint __ovld atomic_and(volatile __local uint *, uint); #ifdef __OPENCL_CPP_VERSION__ -int __ovld atomic_and(volatile int *p, int val); -uint __ovld atomic_and(volatile uint *p, uint val); +int __ovld atomic_and(volatile int *, int); +uint __ovld atomic_and(volatile uint *, uint); #endif #if defined(cl_khr_global_int32_extended_atomics) -int __ovld atom_and(volatile __global int *p, int val); -uint __ovld atom_and(volatile __global uint *p, uint val); +int __ovld atom_and(volatile __global int *, int); +uint __ovld atom_and(volatile __global uint *, uint); #endif #if defined(cl_khr_local_int32_extended_atomics) -int __ovld atom_and(volatile __local int *p, int val); -uint __ovld atom_and(volatile __local uint *p, uint val); +int __ovld atom_and(volatile __local int *, int); +uint __ovld atom_and(volatile __local uint *, uint); #endif #if defined(cl_khr_int64_extended_atomics) -long __ovld atom_and(volatile __global long *p, long val); -ulong __ovld atom_and(volatile __global ulong *p, ulong val); -long __ovld atom_and(volatile __local long *p, long val); -ulong __ovld atom_and(volatile __local ulong *p, ulong val); +long __ovld atom_and(volatile __global long *, long); +ulong __ovld atom_and(volatile __global ulong *, ulong); +long __ovld atom_and(volatile __local long *, long); +ulong __ovld atom_and(volatile __local ulong *, ulong); #endif /** @@ -13174,29 +13174,29 @@ ulong __ovld atom_and(volatile __local ulong *p, ulong val); * (old | val) and store result at location * pointed by p. The function returns old. */ -int __ovld atomic_or(volatile __global int *p, int val); -uint __ovld atomic_or(volatile __global uint *p, uint val); -int __ovld atomic_or(volatile __local int *p, int val); -uint __ovld atomic_or(volatile __local uint *p, uint val); +int __ovld atomic_or(volatile __global int *, int); +uint __ovld atomic_or(volatile __global uint *, uint); +int __ovld atomic_or(volatile __local int *, int); +uint __ovld atomic_or(volatile __local uint *, uint); #ifdef __OPENCL_CPP_VERSION__ -int __ovld atomic_or(volatile int *p, int val); -uint __ovld atomic_or(volatile uint *p, uint val); +int __ovld atomic_or(volatile int *, int); +uint __ovld atomic_or(volatile uint *, uint); #endif #if defined(cl_khr_global_int32_extended_atomics) -int __ovld atom_or(volatile __global int *p, int val); -uint __ovld atom_or(volatile __global uint *p, uint val); +int __ovld atom_or(volatile __global int *, int); +uint __ovld atom_or(volatile __global uint *, uint); #endif #if defined(cl_khr_local_int32_extended_atomics) -int __ovld atom_or(volatile __local int *p, int val); -uint __ovld atom_or(volatile __local uint *p, uint val); +int __ovld atom_or(volatile __local int *, int); +uint __ovld atom_or(volatile __local uint *, uint); #endif #if defined(cl_khr_int64_extended_atomics) -long __ovld atom_or(volatile __global long *p, long val); -ulong __ovld atom_or(volatile __global ulong *p, ulong val); -long __ovld atom_or(volatile __local long *p, long val); -ulong __ovld atom_or(volatile __local ulong *p, ulong val); +long __ovld atom_or(volatile __global long *, long); +ulong __ovld atom_or(volatile __global ulong *, ulong); +long __ovld atom_or(volatile __local long *, long); +ulong __ovld atom_or(volatile __local ulong *, ulong); #endif /** @@ -13205,29 +13205,29 @@ ulong __ovld atom_or(volatile __local ulong *p, ulong val); * (old ^ val) and store result at location * pointed by p. The function returns old. */ -int __ovld atomic_xor(volatile __global int *p, int val); -uint __ovld atomic_xor(volatile __global uint *p, uint val); -int __ovld atomic_xor(volatile __local int *p, int val); -uint __ovld atomic_xor(volatile __local uint *p, uint val); +int __ovld atomic_xor(volatile __global int *, int); +uint __ovld atomic_xor(volatile __global uint *, uint); +int __ovld atomic_xor(volatile __local int *, int); +uint __ovld atomic_xor(volatile __local uint *, uint); #ifdef __OPENCL_CPP_VERSION__ -int __ovld atomic_xor(volatile int *p, int val); -uint __ovld atomic_xor(volatile uint *p, uint val); +int __ovld atomic_xor(volatile int *, int); +uint __ovld atomic_xor(volatile uint *, uint); #endif #if defined(cl_khr_global_int32_extended_atomics) -int __ovld atom_xor(volatile __global int *p, int val); -uint __ovld atom_xor(volatile __global uint *p, uint val); +int __ovld atom_xor(volatile __global int *, int); +uint __ovld atom_xor(volatile __global uint *, uint); #endif #if defined(cl_khr_local_int32_extended_atomics) -int __ovld atom_xor(volatile __local int *p, int val); -uint __ovld atom_xor(volatile __local uint *p, uint val); +int __ovld atom_xor(volatile __local int *, int); +uint __ovld atom_xor(volatile __local uint *, uint); #endif #if defined(cl_khr_int64_extended_atomics) -long __ovld atom_xor(volatile __global long *p, long val); -ulong __ovld atom_xor(volatile __global ulong *p, ulong val); -long __ovld atom_xor(volatile __local long *p, long val); -ulong __ovld atom_xor(volatile __local ulong *p, ulong val); +long __ovld atom_xor(volatile __global long *, long); +ulong __ovld atom_xor(volatile __global ulong *, ulong); +long __ovld atom_xor(volatile __local long *, long); +ulong __ovld atom_xor(volatile __local ulong *, ulong); #endif #if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics) @@ -15257,13 +15257,17 @@ float4 __ovld __purefn read_imagef(read_only image2d_t, sampler_t, float2, float int4 __ovld __purefn read_imagei(read_only image2d_t, sampler_t, float2, float); uint4 __ovld __purefn read_imageui(read_only image2d_t, sampler_t, float2, float); +#ifdef cl_khr_depth_images float __ovld __purefn read_imagef(read_only image2d_depth_t, sampler_t, float2, float); +#endif // cl_khr_depth_images float4 __ovld __purefn read_imagef(read_only image2d_array_t, sampler_t, float4, float); int4 __ovld __purefn read_imagei(read_only image2d_array_t, sampler_t, float4, float); uint4 __ovld __purefn read_imageui(read_only image2d_array_t, sampler_t, float4, float); +#ifdef cl_khr_depth_images float __ovld __purefn read_imagef(read_only image2d_array_depth_t, sampler_t, float4, float); +#endif // cl_khr_depth_images float4 __ovld __purefn read_imagef(read_only image3d_t, sampler_t, float4, float); int4 __ovld __purefn read_imagei(read_only image3d_t, sampler_t, float4, float); @@ -15281,13 +15285,17 @@ float4 __ovld __purefn read_imagef(read_only image2d_t, sampler_t, float2, float int4 __ovld __purefn read_imagei(read_only image2d_t, sampler_t, float2, float2, float2); uint4 __ovld __purefn read_imageui(read_only image2d_t, sampler_t, float2, float2, float2); +#ifdef cl_khr_depth_images float __ovld __purefn read_imagef(read_only image2d_depth_t, sampler_t, float2, float2, float2); +#endif // cl_khr_depth_images float4 __ovld __purefn read_imagef(read_only image2d_array_t, sampler_t, float4, float2, float2); int4 __ovld __purefn read_imagei(read_only image2d_array_t, sampler_t, float4, float2, float2); uint4 __ovld __purefn read_imageui(read_only image2d_array_t, sampler_t, float4, float2, float2); +#ifdef cl_khr_depth_images float __ovld __purefn read_imagef(read_only image2d_array_depth_t, sampler_t, float4, float2, float2); +#endif // cl_khr_depth_images float4 __ovld __purefn read_imagef(read_only image3d_t, sampler_t, float4, float4, float4); int4 __ovld __purefn read_imagei(read_only image3d_t, sampler_t, float4, float4, float4); @@ -15380,9 +15388,11 @@ float4 __ovld __purefn read_imagef(read_write image2d_array_t, int4); int4 __ovld __purefn read_imagei(read_write image2d_array_t, int4); uint4 __ovld __purefn read_imageui(read_write image2d_array_t, int4); +#ifdef cl_khr_3d_image_writes float4 __ovld __purefn read_imagef(read_write image3d_t, int4); int4 __ovld __purefn read_imagei(read_write image3d_t, int4); uint4 __ovld __purefn read_imageui(read_write image3d_t, int4); +#endif // cl_khr_3d_image_writes #ifdef cl_khr_depth_images float __ovld __purefn read_imagef(read_write image2d_depth_t, int2); @@ -15423,9 +15433,11 @@ uint4 __ovld __purefn read_imageui(read_write image2d_array_t, sampler_t, float4 float __ovld __purefn read_imagef(read_write image2d_array_depth_t, sampler_t, float4, float); +#ifdef cl_khr_3d_image_writes float4 __ovld __purefn read_imagef(read_write image3d_t, sampler_t, float4, float); int4 __ovld __purefn read_imagei(read_write image3d_t, sampler_t, float4, float); uint4 __ovld __purefn read_imageui(read_write image3d_t, sampler_t, float4, float); +#endif // cl_khr_3d_image_writes float4 __ovld __purefn read_imagef(read_write image1d_t, sampler_t, float, float, float); int4 __ovld __purefn read_imagei(read_write image1d_t, sampler_t, float, float, float); @@ -15447,9 +15459,11 @@ uint4 __ovld __purefn read_imageui(read_write image2d_array_t, sampler_t, float4 float __ovld __purefn read_imagef(read_write image2d_array_depth_t, sampler_t, float4, float2, float2); +#ifdef cl_khr_3d_image_writes float4 __ovld __purefn read_imagef(read_write image3d_t, sampler_t, float4, float4, float4); int4 __ovld __purefn read_imagei(read_write image3d_t, sampler_t, float4, float4, float4); uint4 __ovld __purefn read_imageui(read_write image3d_t, sampler_t, float4, float4, float4); +#endif // cl_khr_3d_image_writes #endif //cl_khr_mipmap_image @@ -15457,7 +15471,9 @@ uint4 __ovld __purefn read_imageui(read_write image3d_t, sampler_t, float4, floa #ifdef cl_khr_fp16 half4 __ovld __purefn read_imageh(read_write image1d_t, int); half4 __ovld __purefn read_imageh(read_write image2d_t, int2); +#ifdef cl_khr_3d_image_writes half4 __ovld __purefn read_imageh(read_write image3d_t, int4); +#endif // cl_khr_3d_image_writes half4 __ovld __purefn read_imageh(read_write image1d_array_t, int2); half4 __ovld __purefn read_imageh(read_write image2d_array_t, int4); half4 __ovld __purefn read_imageh(read_write image1d_buffer_t, int); @@ -15727,7 +15743,9 @@ int __ovld __cnfn get_image_width(write_only image2d_array_msaa_depth_t); int __ovld __cnfn get_image_width(read_write image1d_t); int __ovld __cnfn get_image_width(read_write image1d_buffer_t); int __ovld __cnfn get_image_width(read_write image2d_t); +#ifdef cl_khr_3d_image_writes int __ovld __cnfn get_image_width(read_write image3d_t); +#endif // cl_khr_3d_image_writes int __ovld __cnfn get_image_width(read_write image1d_array_t); int __ovld __cnfn get_image_width(read_write image2d_array_t); #ifdef cl_khr_depth_images @@ -15777,7 +15795,9 @@ int __ovld __cnfn get_image_height(write_only image2d_array_msaa_depth_t); #if defined(__opencl_c_read_write_images) int __ovld __cnfn get_image_height(read_write image2d_t); +#ifdef cl_khr_3d_image_writes int __ovld __cnfn get_image_height(read_write image3d_t); +#endif // cl_khr_3d_image_writes int __ovld __cnfn get_image_height(read_write image2d_array_t); #ifdef cl_khr_depth_images int __ovld __cnfn get_image_height(read_write image2d_depth_t); @@ -15798,11 +15818,11 @@ int __ovld __cnfn get_image_depth(read_only image3d_t); #ifdef cl_khr_3d_image_writes int __ovld __cnfn get_image_depth(write_only image3d_t); -#endif #if defined(__opencl_c_read_write_images) int __ovld __cnfn get_image_depth(read_write image3d_t); #endif //defined(__opencl_c_read_write_images) +#endif // cl_khr_3d_image_writes // OpenCL Extension v2.0 s9.18 - Mipmaps #if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0) @@ -15824,24 +15844,32 @@ int __ovld get_image_num_mip_levels(write_only image3d_t); #if defined(__opencl_c_read_write_images) int __ovld get_image_num_mip_levels(read_write image1d_t); int __ovld get_image_num_mip_levels(read_write image2d_t); +#ifdef cl_khr_3d_image_writes int __ovld get_image_num_mip_levels(read_write image3d_t); +#endif // cl_khr_3d_image_writes #endif //defined(__opencl_c_read_write_images) int __ovld get_image_num_mip_levels(read_only image1d_array_t); int __ovld get_image_num_mip_levels(read_only image2d_array_t); +#ifdef cl_khr_depth_images int __ovld get_image_num_mip_levels(read_only image2d_array_depth_t); int __ovld get_image_num_mip_levels(read_only image2d_depth_t); +#endif // cl_khr_depth_images int __ovld get_image_num_mip_levels(write_only image1d_array_t); int __ovld get_image_num_mip_levels(write_only image2d_array_t); +#ifdef cl_khr_depth_images int __ovld get_image_num_mip_levels(write_only image2d_array_depth_t); int __ovld get_image_num_mip_levels(write_only image2d_depth_t); +#endif // cl_khr_depth_images #if defined(__opencl_c_read_write_images) int __ovld get_image_num_mip_levels(read_write image1d_array_t); int __ovld get_image_num_mip_levels(read_write image2d_array_t); +#ifdef cl_khr_depth_images int __ovld get_image_num_mip_levels(read_write image2d_array_depth_t); int __ovld get_image_num_mip_levels(read_write image2d_depth_t); +#endif // cl_khr_depth_images #endif //defined(__opencl_c_read_write_images) #endif //cl_khr_mipmap_image @@ -15906,7 +15934,9 @@ int __ovld __cnfn get_image_channel_data_type(write_only image2d_array_msaa_dept int __ovld __cnfn get_image_channel_data_type(read_write image1d_t); int __ovld __cnfn get_image_channel_data_type(read_write image1d_buffer_t); int __ovld __cnfn get_image_channel_data_type(read_write image2d_t); +#ifdef cl_khr_3d_image_writes int __ovld __cnfn get_image_channel_data_type(read_write image3d_t); +#endif // cl_khr_3d_image_writes int __ovld __cnfn get_image_channel_data_type(read_write image1d_array_t); int __ovld __cnfn get_image_channel_data_type(read_write image2d_array_t); #ifdef cl_khr_depth_images @@ -15978,7 +16008,9 @@ int __ovld __cnfn get_image_channel_order(write_only image2d_array_msaa_depth_t) int __ovld __cnfn get_image_channel_order(read_write image1d_t); int __ovld __cnfn get_image_channel_order(read_write image1d_buffer_t); int __ovld __cnfn get_image_channel_order(read_write image2d_t); +#ifdef cl_khr_3d_image_writes int __ovld __cnfn get_image_channel_order(read_write image3d_t); +#endif // cl_khr_3d_image_writes int __ovld __cnfn get_image_channel_order(read_write image1d_array_t); int __ovld __cnfn get_image_channel_order(read_write image2d_array_t); #ifdef cl_khr_depth_images @@ -16048,10 +16080,10 @@ int2 __ovld __cnfn get_image_dim(read_write image2d_array_msaa_depth_t); int4 __ovld __cnfn get_image_dim(read_only image3d_t); #ifdef cl_khr_3d_image_writes int4 __ovld __cnfn get_image_dim(write_only image3d_t); -#endif #if defined(__opencl_c_read_write_images) int4 __ovld __cnfn get_image_dim(read_write image3d_t); #endif //defined(__opencl_c_read_write_images) +#endif // cl_khr_3d_image_writes /** * Return the image array size. @@ -16266,9 +16298,9 @@ uint __ovld get_enqueued_num_sub_groups(void); uint __ovld get_sub_group_id(void); uint __ovld get_sub_group_local_id(void); -void __ovld __conv sub_group_barrier(cl_mem_fence_flags flags); +void __ovld __conv sub_group_barrier(cl_mem_fence_flags); #if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0) -void __ovld __conv sub_group_barrier(cl_mem_fence_flags flags, memory_scope); +void __ovld __conv sub_group_barrier(cl_mem_fence_flags, memory_scope); #endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0) int __ovld __conv sub_group_all(int predicate); @@ -17847,15 +17879,13 @@ intel_sub_group_avc_sic_configure_skc( uint skip_block_partition_type, uint skip_motion_vector_mask, ulong motion_vectors, uchar bidirectional_weight, uchar skip_sad_adjustment, intel_sub_group_avc_sic_payload_t payload); -intel_sub_group_avc_sic_payload_t __ovld -intel_sub_group_avc_sic_configure_ipe( - uchar luma_intra_partition_mask, uchar intra_neighbour_availabilty, +intel_sub_group_avc_sic_payload_t __ovld intel_sub_group_avc_sic_configure_ipe( + uchar luma_intra_partition_mask, uchar intra_neighbour_availability, uchar left_edge_luma_pixels, uchar upper_left_corner_luma_pixel, uchar upper_edge_luma_pixels, uchar upper_right_edge_luma_pixels, uchar intra_sad_adjustment, intel_sub_group_avc_sic_payload_t payload); -intel_sub_group_avc_sic_payload_t __ovld -intel_sub_group_avc_sic_configure_ipe( - uchar luma_intra_partition_mask, uchar intra_neighbour_availabilty, +intel_sub_group_avc_sic_payload_t __ovld intel_sub_group_avc_sic_configure_ipe( + uchar luma_intra_partition_mask, uchar intra_neighbour_availability, uchar left_edge_luma_pixels, uchar upper_left_corner_luma_pixel, uchar upper_edge_luma_pixels, uchar upper_right_edge_luma_pixels, ushort left_edge_chroma_pixels, ushort upper_left_corner_chroma_pixel, diff --git a/lib/include/ppc_wrappers/emmintrin.h b/lib/include/ppc_wrappers/emmintrin.h index a4c458a41b..0814ea5593 100644 --- a/lib/include/ppc_wrappers/emmintrin.h +++ b/lib/include/ppc_wrappers/emmintrin.h @@ -36,7 +36,7 @@ #ifndef EMMINTRIN_H_ #define EMMINTRIN_H_ -#if defined(__ppc64__) && \ +#if defined(__powerpc64__) && \ (defined(__linux__) || defined(__FreeBSD__) || defined(_AIX)) #include @@ -2262,7 +2262,7 @@ extern __inline __m128d #else #include_next -#endif /* defined(__ppc64__) && +#endif /* defined(__powerpc64__) && \ * (defined(__linux__) || defined(__FreeBSD__) || defined(_AIX)) */ #endif /* EMMINTRIN_H_ */ diff --git a/lib/include/ppc_wrappers/mm_malloc.h b/lib/include/ppc_wrappers/mm_malloc.h index 65920917f3..7c1e625e44 100644 --- a/lib/include/ppc_wrappers/mm_malloc.h +++ b/lib/include/ppc_wrappers/mm_malloc.h @@ -10,7 +10,7 @@ #ifndef _MM_MALLOC_H_INCLUDED #define _MM_MALLOC_H_INCLUDED -#if defined(__ppc64__) && \ +#if defined(__powerpc64__) && \ (defined(__linux__) || defined(__FreeBSD__) || defined(_AIX)) #include diff --git a/lib/include/ppc_wrappers/mmintrin.h b/lib/include/ppc_wrappers/mmintrin.h index 70e8b81e11..0be3af2b0b 100644 --- a/lib/include/ppc_wrappers/mmintrin.h +++ b/lib/include/ppc_wrappers/mmintrin.h @@ -35,7 +35,7 @@ #ifndef _MMINTRIN_H_INCLUDED #define _MMINTRIN_H_INCLUDED -#if defined(__ppc64__) && \ +#if defined(__powerpc64__) && \ (defined(__linux__) || defined(__FreeBSD__) || defined(_AIX)) #include @@ -1447,7 +1447,7 @@ extern __inline __m64 #else #include_next -#endif /* defined(__ppc64__) && +#endif /* defined(__powerpc64__) && \ * (defined(__linux__) || defined(__FreeBSD__) || defined(_AIX)) */ #endif /* _MMINTRIN_H_INCLUDED */ diff --git a/lib/include/ppc_wrappers/pmmintrin.h b/lib/include/ppc_wrappers/pmmintrin.h index fda39edbaa..db128192ab 100644 --- a/lib/include/ppc_wrappers/pmmintrin.h +++ b/lib/include/ppc_wrappers/pmmintrin.h @@ -39,7 +39,7 @@ #ifndef PMMINTRIN_H_ #define PMMINTRIN_H_ -#if defined(__ppc64__) && \ +#if defined(__powerpc64__) && \ (defined(__linux__) || defined(__FreeBSD__) || defined(_AIX)) /* We need definitions from the SSE2 and SSE header files*/ @@ -139,7 +139,7 @@ extern __inline __m128i #else #include_next -#endif /* defined(__ppc64__) && +#endif /* defined(__powerpc64__) && \ * (defined(__linux__) || defined(__FreeBSD__) || defined(_AIX)) */ #endif /* PMMINTRIN_H_ */ diff --git a/lib/include/ppc_wrappers/smmintrin.h b/lib/include/ppc_wrappers/smmintrin.h index 6fe6c8a93d..6fe6d2a157 100644 --- a/lib/include/ppc_wrappers/smmintrin.h +++ b/lib/include/ppc_wrappers/smmintrin.h @@ -29,7 +29,7 @@ #ifndef SMMINTRIN_H_ #define SMMINTRIN_H_ -#if defined(__ppc64__) && \ +#if defined(__powerpc64__) && \ (defined(__linux__) || defined(__FreeBSD__) || defined(_AIX)) #include @@ -657,7 +657,7 @@ extern __inline __m128i #else #include_next -#endif /* defined(__ppc64__) && +#endif /* defined(__powerpc64__) && \ * (defined(__linux__) || defined(__FreeBSD__) || defined(_AIX)) */ #endif /* SMMINTRIN_H_ */ diff --git a/lib/include/ppc_wrappers/tmmintrin.h b/lib/include/ppc_wrappers/tmmintrin.h index 6185ca1e7e..92f08676d2 100644 --- a/lib/include/ppc_wrappers/tmmintrin.h +++ b/lib/include/ppc_wrappers/tmmintrin.h @@ -25,7 +25,7 @@ #ifndef TMMINTRIN_H_ #define TMMINTRIN_H_ -#if defined(__ppc64__) && \ +#if defined(__powerpc64__) && \ (defined(__linux__) || defined(__FreeBSD__) || defined(_AIX)) #include @@ -447,7 +447,7 @@ extern __inline __m64 #else #include_next -#endif /* defined(__ppc64__) && +#endif /* defined(__powerpc64__) && \ * (defined(__linux__) || defined(__FreeBSD__) || defined(_AIX)) */ #endif /* TMMINTRIN_H_ */ diff --git a/lib/include/ppc_wrappers/xmmintrin.h b/lib/include/ppc_wrappers/xmmintrin.h index ee0032ca15..9dd21b65c2 100644 --- a/lib/include/ppc_wrappers/xmmintrin.h +++ b/lib/include/ppc_wrappers/xmmintrin.h @@ -35,7 +35,7 @@ #ifndef XMMINTRIN_H_ #define XMMINTRIN_H_ -#if defined(__ppc64__) && \ +#if defined(__powerpc64__) && \ (defined(__linux__) || defined(__FreeBSD__) || defined(_AIX)) /* Define four value permute mask */ @@ -1821,7 +1821,7 @@ extern __inline void #else #include_next -#endif /* defined(__ppc64__) && +#endif /* defined(__powerpc64__) && \ * (defined(__linux__) || defined(__FreeBSD__) || defined(_AIX)) */ #endif /* XMMINTRIN_H_ */ diff --git a/lib/include/prfchiintrin.h b/lib/include/prfchiintrin.h new file mode 100644 index 0000000000..36600b25aa --- /dev/null +++ b/lib/include/prfchiintrin.h @@ -0,0 +1,61 @@ +/*===---- prfchiintrin.h - PREFETCHI intrinsic -----------------------------=== + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ + +#ifndef __PRFCHIINTRIN_H +#define __PRFCHIINTRIN_H + +#ifdef __x86_64__ + +/* Define the default attributes for the functions in this file. */ +#define __DEFAULT_FN_ATTRS \ + __attribute__((__always_inline__, __nodebug__, __target__("prefetchi"))) + +/// Loads an instruction sequence containing the specified memory address into +/// all level cache. +/// +/// Note that the effect of this intrinsic is dependent on the processor +/// implementation. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c PREFETCHIT0 instruction. +/// +/// \param __P +/// A pointer specifying the memory address to be prefetched. +static __inline__ void __DEFAULT_FN_ATTRS +_m_prefetchit0(volatile const void *__P) { +#pragma clang diagnostic push +#pragma clang diagnostic ignored "-Wcast-qual" + __builtin_ia32_prefetchi((const void *)__P, 3 /* _MM_HINT_T0 */); +#pragma clang diagnostic pop +} + +/// Loads an instruction sequence containing the specified memory address into +/// all but the first-level cache. +/// +/// Note that the effect of this intrinsic is dependent on the processor +/// implementation. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c PREFETCHIT1 instruction. +/// +/// \param __P +/// A pointer specifying the memory address to be prefetched. +static __inline__ void __DEFAULT_FN_ATTRS +_m_prefetchit1(volatile const void *__P) { +#pragma clang diagnostic push +#pragma clang diagnostic ignored "-Wcast-qual" + __builtin_ia32_prefetchi((const void *)__P, 2 /* _MM_HINT_T1 */); +#pragma clang diagnostic pop +} +#endif /* __x86_64__ */ +#undef __DEFAULT_FN_ATTRS + +#endif /* __PRFCHWINTRIN_H */ diff --git a/lib/include/raointintrin.h b/lib/include/raointintrin.h new file mode 100644 index 0000000000..d3290eb62a --- /dev/null +++ b/lib/include/raointintrin.h @@ -0,0 +1,203 @@ +/*===----------------------- raointintrin.h - RAOINT ------------------------=== + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ + +#ifndef __X86GPRINTRIN_H +#error "Never use directly; include instead." +#endif // __X86GPRINTRIN_H + +#ifndef __RAOINTINTRIN_H +#define __RAOINTINTRIN_H + +#define __DEFAULT_FN_ATTRS \ + __attribute__((__always_inline__, __nodebug__, __target__("raoint"))) + +/// Atomically add a 32-bit value at memory operand \a __A and a 32-bit \a __B, +/// and store the result to the same memory location. +/// +/// This intrinsic should be used for contention or weak ordering. It may +/// result in bad performance for hot data used by single thread only. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c AADD instruction. +/// +/// \param __A +/// A pointer to a 32-bit memory location. +/// \param __B +/// A 32-bit integer value. +/// +/// \code{.operation} +/// MEM[__A+31:__A] := MEM[__A+31:__A] + __B[31:0] +/// \endcode +static __inline__ void __DEFAULT_FN_ATTRS _aadd_i32(int *__A, int __B) { + __builtin_ia32_aadd32((int *)__A, __B); +} + +/// Atomically and a 32-bit value at memory operand \a __A and a 32-bit \a __B, +/// and store the result to the same memory location. +/// +/// This intrinsic should be used for contention or weak ordering. It may +/// result in bad performance for hot data used by single thread only. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c AAND instruction. +/// +/// \param __A +/// A pointer to a 32-bit memory location. +/// \param __B +/// A 32-bit integer value. +/// +/// \code{.operation} +/// MEM[__A+31:__A] := MEM[__A+31:__A] AND __B[31:0] +/// \endcode +static __inline__ void __DEFAULT_FN_ATTRS _aand_i32(int *__A, int __B) { + __builtin_ia32_aand32((int *)__A, __B); +} + +/// Atomically or a 32-bit value at memory operand \a __A and a 32-bit \a __B, +/// and store the result to the same memory location. +/// +/// This intrinsic should be used for contention or weak ordering. It may +/// result in bad performance for hot data used by single thread only. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c AOR instruction. +/// +/// \param __A +/// A pointer to a 32-bit memory location. +/// \param __B +/// A 32-bit integer value. +/// +/// \code{.operation} +/// MEM[__A+31:__A] := MEM[__A+31:__A] OR __B[31:0] +/// \endcode +static __inline__ void __DEFAULT_FN_ATTRS _aor_i32(int *__A, int __B) { + __builtin_ia32_aor32((int *)__A, __B); +} + +/// Atomically xor a 32-bit value at memory operand \a __A and a 32-bit \a __B, +/// and store the result to the same memory location. +/// +/// This intrinsic should be used for contention or weak ordering. It may +/// result in bad performance for hot data used by single thread only. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c AXOR instruction. +/// +/// \param __A +/// A pointer to a 32-bit memory location. +/// \param __B +/// A 32-bit integer value. +/// +/// \code{.operation} +/// MEM[__A+31:__A] := MEM[__A+31:__A] XOR __B[31:0] +/// \endcode +static __inline__ void __DEFAULT_FN_ATTRS _axor_i32(int *__A, int __B) { + __builtin_ia32_axor32((int *)__A, __B); +} + +#ifdef __x86_64__ +/// Atomically add a 64-bit value at memory operand \a __A and a 64-bit \a __B, +/// and store the result to the same memory location. +/// +/// This intrinsic should be used for contention or weak ordering. It may +/// result in bad performance for hot data used by single thread only. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c AADD instruction. +/// +/// \param __A +/// A pointer to a 64-bit memory location. +/// \param __B +/// A 64-bit integer value. +/// +/// \code{.operation} +/// MEM[__A+63:__A] := MEM[__A+63:__A] + __B[63:0] +/// \endcode +static __inline__ void __DEFAULT_FN_ATTRS _aadd_i64(long long *__A, + long long __B) { + __builtin_ia32_aadd64((long long *)__A, __B); +} + +/// Atomically and a 64-bit value at memory operand \a __A and a 64-bit \a __B, +/// and store the result to the same memory location. +/// +/// This intrinsic should be used for contention or weak ordering. It may +/// result in bad performance for hot data used by single thread only. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c AAND instruction. +/// +/// \param __A +/// A pointer to a 64-bit memory location. +/// \param __B +/// A 64-bit integer value. +/// +/// \code{.operation} +/// MEM[__A+63:__A] := MEM[__A+63:__A] AND __B[63:0] +/// \endcode +static __inline__ void __DEFAULT_FN_ATTRS _aand_i64(long long *__A, + long long __B) { + __builtin_ia32_aand64((long long *)__A, __B); +} + +/// Atomically or a 64-bit value at memory operand \a __A and a 64-bit \a __B, +/// and store the result to the same memory location. +/// +/// This intrinsic should be used for contention or weak ordering. It may +/// result in bad performance for hot data used by single thread only. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c AOR instruction. +/// +/// \param __A +/// A pointer to a 64-bit memory location. +/// \param __B +/// A 64-bit integer value. +/// +/// \code{.operation} +/// MEM[__A+63:__A] := MEM[__A+63:__A] OR __B[63:0] +/// \endcode +static __inline__ void __DEFAULT_FN_ATTRS _aor_i64(long long *__A, + long long __B) { + __builtin_ia32_aor64((long long *)__A, __B); +} + +/// Atomically xor a 64-bit value at memory operand \a __A and a 64-bit \a __B, +/// and store the result to the same memory location. +/// +/// This intrinsic should be used for contention or weak ordering. It may +/// result in bad performance for hot data used by single thread only. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c AXOR instruction. +/// +/// \param __A +/// A pointer to a 64-bit memory location. +/// \param __B +/// A 64-bit integer value. +/// +/// \code{.operation} +/// MEM[__A+63:__A] := MEM[__A+63:__A] XOR __B[63:0] +/// \endcode +static __inline__ void __DEFAULT_FN_ATTRS _axor_i64(long long *__A, + long long __B) { + __builtin_ia32_axor64((long long *)__A, __B); +} +#endif // __x86_64__ + +#undef __DEFAULT_FN_ATTRS +#endif // __RAOINTINTRIN_H diff --git a/lib/include/riscv_vector.h b/lib/include/riscv_vector.h index cac2b2de1e..2a9598e39c 100644 --- a/lib/include/riscv_vector.h +++ b/lib/include/riscv_vector.h @@ -25,6 +25,8 @@ extern "C" { #pragma clang riscv intrinsic vector +#define vlenb() __builtin_rvv_vlenb() + enum RVV_CSR { RVV_VSTART = 0, RVV_VXSAT, @@ -70,7 +72,6 @@ void vwrite_csr(enum RVV_CSR __csr, unsigned long __value) { } } -#define vsetvl_e8mf8(avl) __builtin_rvv_vsetvli((size_t)(avl), 0, 5) #define vsetvl_e8mf4(avl) __builtin_rvv_vsetvli((size_t)(avl), 0, 6) #define vsetvl_e8mf2(avl) __builtin_rvv_vsetvli((size_t)(avl), 0, 7) #define vsetvl_e8m1(avl) __builtin_rvv_vsetvli((size_t)(avl), 0, 0) @@ -78,25 +79,28 @@ void vwrite_csr(enum RVV_CSR __csr, unsigned long __value) { #define vsetvl_e8m4(avl) __builtin_rvv_vsetvli((size_t)(avl), 0, 2) #define vsetvl_e8m8(avl) __builtin_rvv_vsetvli((size_t)(avl), 0, 3) -#define vsetvl_e16mf4(avl) __builtin_rvv_vsetvli((size_t)(avl), 1, 6) #define vsetvl_e16mf2(avl) __builtin_rvv_vsetvli((size_t)(avl), 1, 7) #define vsetvl_e16m1(avl) __builtin_rvv_vsetvli((size_t)(avl), 1, 0) #define vsetvl_e16m2(avl) __builtin_rvv_vsetvli((size_t)(avl), 1, 1) #define vsetvl_e16m4(avl) __builtin_rvv_vsetvli((size_t)(avl), 1, 2) #define vsetvl_e16m8(avl) __builtin_rvv_vsetvli((size_t)(avl), 1, 3) -#define vsetvl_e32mf2(avl) __builtin_rvv_vsetvli((size_t)(avl), 2, 7) #define vsetvl_e32m1(avl) __builtin_rvv_vsetvli((size_t)(avl), 2, 0) #define vsetvl_e32m2(avl) __builtin_rvv_vsetvli((size_t)(avl), 2, 1) #define vsetvl_e32m4(avl) __builtin_rvv_vsetvli((size_t)(avl), 2, 2) #define vsetvl_e32m8(avl) __builtin_rvv_vsetvli((size_t)(avl), 2, 3) +#if __riscv_v_elen >= 64 +#define vsetvl_e8mf8(avl) __builtin_rvv_vsetvli((size_t)(avl), 0, 5) +#define vsetvl_e16mf4(avl) __builtin_rvv_vsetvli((size_t)(avl), 1, 6) +#define vsetvl_e32mf2(avl) __builtin_rvv_vsetvli((size_t)(avl), 2, 7) + #define vsetvl_e64m1(avl) __builtin_rvv_vsetvli((size_t)(avl), 3, 0) #define vsetvl_e64m2(avl) __builtin_rvv_vsetvli((size_t)(avl), 3, 1) #define vsetvl_e64m4(avl) __builtin_rvv_vsetvli((size_t)(avl), 3, 2) #define vsetvl_e64m8(avl) __builtin_rvv_vsetvli((size_t)(avl), 3, 3) +#endif -#define vsetvlmax_e8mf8() __builtin_rvv_vsetvlimax(0, 5) #define vsetvlmax_e8mf4() __builtin_rvv_vsetvlimax(0, 6) #define vsetvlmax_e8mf2() __builtin_rvv_vsetvlimax(0, 7) #define vsetvlmax_e8m1() __builtin_rvv_vsetvlimax(0, 0) @@ -104,23 +108,28 @@ void vwrite_csr(enum RVV_CSR __csr, unsigned long __value) { #define vsetvlmax_e8m4() __builtin_rvv_vsetvlimax(0, 2) #define vsetvlmax_e8m8() __builtin_rvv_vsetvlimax(0, 3) -#define vsetvlmax_e16mf4() __builtin_rvv_vsetvlimax(1, 6) #define vsetvlmax_e16mf2() __builtin_rvv_vsetvlimax(1, 7) #define vsetvlmax_e16m1() __builtin_rvv_vsetvlimax(1, 0) #define vsetvlmax_e16m2() __builtin_rvv_vsetvlimax(1, 1) #define vsetvlmax_e16m4() __builtin_rvv_vsetvlimax(1, 2) #define vsetvlmax_e16m8() __builtin_rvv_vsetvlimax(1, 3) -#define vsetvlmax_e32mf2() __builtin_rvv_vsetvlimax(2, 7) #define vsetvlmax_e32m1() __builtin_rvv_vsetvlimax(2, 0) #define vsetvlmax_e32m2() __builtin_rvv_vsetvlimax(2, 1) #define vsetvlmax_e32m4() __builtin_rvv_vsetvlimax(2, 2) #define vsetvlmax_e32m8() __builtin_rvv_vsetvlimax(2, 3) +#if __riscv_v_elen >= 64 +#define vsetvlmax_e8mf8() __builtin_rvv_vsetvlimax(0, 5) +#define vsetvlmax_e16mf4() __builtin_rvv_vsetvlimax(1, 6) +#define vsetvlmax_e32mf2() __builtin_rvv_vsetvlimax(2, 7) + #define vsetvlmax_e64m1() __builtin_rvv_vsetvlimax(3, 0) #define vsetvlmax_e64m2() __builtin_rvv_vsetvlimax(3, 1) #define vsetvlmax_e64m4() __builtin_rvv_vsetvlimax(3, 2) #define vsetvlmax_e64m8() __builtin_rvv_vsetvlimax(3, 3) +#endif + typedef __rvv_bool64_t vbool64_t; typedef __rvv_bool32_t vbool32_t; typedef __rvv_bool16_t vbool16_t; diff --git a/lib/include/smmintrin.h b/lib/include/smmintrin.h index 46fb7bcd4e..2111c24f31 100644 --- a/lib/include/smmintrin.h +++ b/lib/include/smmintrin.h @@ -818,7 +818,7 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_max_epu32(__m128i __V1, /// parameter, is copied to the result. /// \param N /// Specifies which bits from operand \a Y will be copied, which bits in the -/// result they will be be copied to, and which bits in the result will be +/// result they will be copied to, and which bits in the result will be /// cleared. The following assignments are made: \n /// Bits [7:6] specify the bits to copy from operand \a Y: \n /// 00: Selects bits [31:0] from operand \a Y. \n diff --git a/lib/include/stdarg.h b/lib/include/stdarg.h index 0bc39408c1..ba978721f1 100644 --- a/lib/include/stdarg.h +++ b/lib/include/stdarg.h @@ -8,13 +8,30 @@ */ #ifndef __STDARG_H -#define __STDARG_H +#ifndef __GNUC_VA_LIST +#define __GNUC_VA_LIST +typedef __builtin_va_list __gnuc_va_list; +#endif + +#ifdef __need___va_list +#undef __need___va_list +#else +#define __STDARG_H #ifndef _VA_LIST typedef __builtin_va_list va_list; #define _VA_LIST #endif + +/* FIXME: This is using the placeholder dates Clang produces for these macros + in C2x mode; switch to the correct values once they've been published. */ +#if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 202000L +/* C2x does not require the second parameter for va_start. */ +#define va_start(ap, ...) __builtin_va_start(ap, 0) +#else +/* Versions before C2x do require the second parameter. */ #define va_start(ap, param) __builtin_va_start(ap, param) +#endif #define va_end(ap) __builtin_va_end(ap) #define va_arg(ap, type) __builtin_va_arg(ap, type) @@ -23,13 +40,12 @@ typedef __builtin_va_list va_list; */ #define __va_copy(d,s) __builtin_va_copy(d,s) -#if __STDC_VERSION__ >= 199901L || __cplusplus >= 201103L || !defined(__STRICT_ANSI__) +#if (defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L) || \ + (defined(__cplusplus) && __cplusplus >= 201103L) || \ + !defined(__STRICT_ANSI__) #define va_copy(dest, src) __builtin_va_copy(dest, src) #endif -#ifndef __GNUC_VA_LIST -#define __GNUC_VA_LIST 1 -typedef __builtin_va_list __gnuc_va_list; -#endif - #endif /* __STDARG_H */ + +#endif /* not __STDARG_H */ diff --git a/lib/include/stdatomic.h b/lib/include/stdatomic.h index 318c7ca56e..0f893beea6 100644 --- a/lib/include/stdatomic.h +++ b/lib/include/stdatomic.h @@ -15,10 +15,12 @@ * * Exclude the MSVC path as well as the MSVC header as of the 14.31.30818 * explicitly disallows `stdatomic.h` in the C mode via an `#error`. Fallback - * to the clang resource header until that is fully supported. + * to the clang resource header until that is fully supported. The + * `stdatomic.h` header requires C++ 23 or newer. */ #if __STDC_HOSTED__ && \ - __has_include_next() && !(defined(_MSC_VER) && !defined(__cplusplus)) + __has_include_next() && \ + (!defined(_MSC_VER) || (defined(__cplusplus) && __cplusplus >= 202002L)) # include_next #else @@ -45,7 +47,8 @@ extern "C" { /* 7.17.2 Initialization */ #define ATOMIC_VAR_INIT(value) (value) -#if (__STDC_VERSION__ >= 201710L || __cplusplus >= 202002L) && \ +#if ((defined(__STDC_VERSION__) && __STDC_VERSION__ >= 201710L) || \ + (defined(__cplusplus) && __cplusplus >= 202002L)) && \ !defined(_CLANG_DISABLE_CRT_DEPRECATION_WARNINGS) /* ATOMIC_VAR_INIT was deprecated in C17 and C++20. */ #pragma clang deprecated(ATOMIC_VAR_INIT) diff --git a/lib/include/stdbool.h b/lib/include/stdbool.h index f0e588532e..9406aab0ca 100644 --- a/lib/include/stdbool.h +++ b/lib/include/stdbool.h @@ -12,7 +12,7 @@ #define __bool_true_false_are_defined 1 -#if __STDC_VERSION__ > 201710L +#if defined(__STDC_VERSION__) && __STDC_VERSION__ > 201710L /* FIXME: We should be issuing a deprecation warning here, but cannot yet due * to system headers which include this header file unconditionally. */ @@ -23,7 +23,7 @@ #elif defined(__GNUC__) && !defined(__STRICT_ANSI__) /* Define _Bool as a GNU extension. */ #define _Bool bool -#if __cplusplus < 201103L +#if defined(__cplusplus) && __cplusplus < 201103L /* For C++98, define bool, false, true as a GNU extension. */ #define bool bool #define false false diff --git a/lib/include/stddef.h b/lib/include/stddef.h index a15d21b553..42815176dc 100644 --- a/lib/include/stddef.h +++ b/lib/include/stddef.h @@ -97,8 +97,15 @@ using ::std::nullptr_t; #undef __need_NULL #endif /* defined(__need_NULL) */ +/* FIXME: This is using the placeholder dates Clang produces for these macros + in C2x mode; switch to the correct values once they've been published. */ +#if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 202000L +typedef typeof(nullptr) nullptr_t; +#endif /* defined(__STDC_VERSION__) && __STDC_VERSION__ >= 202000L */ + #if defined(__need_STDDEF_H_misc) -#if __STDC_VERSION__ >= 201112L || __cplusplus >= 201103L +#if (defined(__STDC_VERSION__) && __STDC_VERSION__ >= 201112L) || \ + (defined(__cplusplus) && __cplusplus >= 201103L) #include "__stddef_max_align_t.h" #endif #define offsetof(t, d) __builtin_offsetof(t, d) diff --git a/lib/include/stdint.h b/lib/include/stdint.h index 4790c25a27..a47e91be18 100644 --- a/lib/include/stdint.h +++ b/lib/include/stdint.h @@ -96,13 +96,21 @@ typedef __INT64_TYPE__ int64_t; # endif /* __int8_t_defined */ typedef __UINT64_TYPE__ uint64_t; +# undef __int_least64_t # define __int_least64_t int64_t +# undef __uint_least64_t # define __uint_least64_t uint64_t +# undef __int_least32_t # define __int_least32_t int64_t +# undef __uint_least32_t # define __uint_least32_t uint64_t +# undef __int_least16_t # define __int_least16_t int64_t +# undef __uint_least16_t # define __uint_least16_t uint64_t +# undef __int_least8_t # define __int_least8_t int64_t +# undef __uint_least8_t # define __uint_least8_t uint64_t #endif /* __INT64_TYPE__ */ @@ -120,11 +128,17 @@ typedef int56_t int_least56_t; typedef uint56_t uint_least56_t; typedef int56_t int_fast56_t; typedef uint56_t uint_fast56_t; +# undef __int_least32_t # define __int_least32_t int56_t +# undef __uint_least32_t # define __uint_least32_t uint56_t +# undef __int_least16_t # define __int_least16_t int56_t +# undef __uint_least16_t # define __uint_least16_t uint56_t +# undef __int_least8_t # define __int_least8_t int56_t +# undef __uint_least8_t # define __uint_least8_t uint56_t #endif /* __INT56_TYPE__ */ @@ -136,11 +150,17 @@ typedef int48_t int_least48_t; typedef uint48_t uint_least48_t; typedef int48_t int_fast48_t; typedef uint48_t uint_fast48_t; +# undef __int_least32_t # define __int_least32_t int48_t +# undef __uint_least32_t # define __uint_least32_t uint48_t +# undef __int_least16_t # define __int_least16_t int48_t +# undef __uint_least16_t # define __uint_least16_t uint48_t +# undef __int_least8_t # define __int_least8_t int48_t +# undef __uint_least8_t # define __uint_least8_t uint48_t #endif /* __INT48_TYPE__ */ @@ -152,11 +172,17 @@ typedef int40_t int_least40_t; typedef uint40_t uint_least40_t; typedef int40_t int_fast40_t; typedef uint40_t uint_fast40_t; +# undef __int_least32_t # define __int_least32_t int40_t +# undef __uint_least32_t # define __uint_least32_t uint40_t +# undef __int_least16_t # define __int_least16_t int40_t +# undef __uint_least16_t # define __uint_least16_t uint40_t +# undef __int_least8_t # define __int_least8_t int40_t +# undef __uint_least8_t # define __uint_least8_t uint40_t #endif /* __INT40_TYPE__ */ @@ -172,11 +198,17 @@ typedef __INT32_TYPE__ int32_t; typedef __UINT32_TYPE__ uint32_t; # endif /* __uint32_t_defined */ +# undef __int_least32_t # define __int_least32_t int32_t +# undef __uint_least32_t # define __uint_least32_t uint32_t +# undef __int_least16_t # define __int_least16_t int32_t +# undef __uint_least16_t # define __uint_least16_t uint32_t +# undef __int_least8_t # define __int_least8_t int32_t +# undef __uint_least8_t # define __uint_least8_t uint32_t #endif /* __INT32_TYPE__ */ @@ -194,9 +226,13 @@ typedef int24_t int_least24_t; typedef uint24_t uint_least24_t; typedef int24_t int_fast24_t; typedef uint24_t uint_fast24_t; +# undef __int_least16_t # define __int_least16_t int24_t +# undef __uint_least16_t # define __uint_least16_t uint24_t +# undef __int_least8_t # define __int_least8_t int24_t +# undef __uint_least8_t # define __uint_least8_t uint24_t #endif /* __INT24_TYPE__ */ @@ -205,9 +241,13 @@ typedef uint24_t uint_fast24_t; typedef __INT16_TYPE__ int16_t; #endif /* __int8_t_defined */ typedef __UINT16_TYPE__ uint16_t; +# undef __int_least16_t # define __int_least16_t int16_t +# undef __uint_least16_t # define __uint_least16_t uint16_t +# undef __int_least8_t # define __int_least8_t int16_t +# undef __uint_least8_t # define __uint_least8_t uint16_t #endif /* __INT16_TYPE__ */ @@ -224,7 +264,9 @@ typedef __uint_least16_t uint_fast16_t; typedef __INT8_TYPE__ int8_t; #endif /* __int8_t_defined */ typedef __UINT8_TYPE__ uint8_t; +# undef __int_least8_t # define __int_least8_t int8_t +# undef __uint_least8_t # define __uint_least8_t uint8_t #endif /* __INT8_TYPE__ */ @@ -285,16 +327,15 @@ typedef __UINTMAX_TYPE__ uintmax_t; #ifdef __INT64_TYPE__ +# undef __int64_c_suffix +# undef __int32_c_suffix +# undef __int16_c_suffix +# undef __int8_c_suffix # ifdef __INT64_C_SUFFIX__ # define __int64_c_suffix __INT64_C_SUFFIX__ # define __int32_c_suffix __INT64_C_SUFFIX__ # define __int16_c_suffix __INT64_C_SUFFIX__ # define __int8_c_suffix __INT64_C_SUFFIX__ -# else -# undef __int64_c_suffix -# undef __int32_c_suffix -# undef __int16_c_suffix -# undef __int8_c_suffix # endif /* __INT64_C_SUFFIX__ */ #endif /* __INT64_TYPE__ */ @@ -310,6 +351,9 @@ typedef __UINTMAX_TYPE__ uintmax_t; #ifdef __INT56_TYPE__ +# undef __int32_c_suffix +# undef __int16_c_suffix +# undef __int8_c_suffix # ifdef __INT56_C_SUFFIX__ # define INT56_C(v) __int_c(v, __INT56_C_SUFFIX__) # define UINT56_C(v) __uint_c(v, __INT56_C_SUFFIX__) @@ -319,14 +363,14 @@ typedef __UINTMAX_TYPE__ uintmax_t; # else # define INT56_C(v) v # define UINT56_C(v) v ## U -# undef __int32_c_suffix -# undef __int16_c_suffix -# undef __int8_c_suffix # endif /* __INT56_C_SUFFIX__ */ #endif /* __INT56_TYPE__ */ #ifdef __INT48_TYPE__ +# undef __int32_c_suffix +# undef __int16_c_suffix +# undef __int8_c_suffix # ifdef __INT48_C_SUFFIX__ # define INT48_C(v) __int_c(v, __INT48_C_SUFFIX__) # define UINT48_C(v) __uint_c(v, __INT48_C_SUFFIX__) @@ -336,14 +380,14 @@ typedef __UINTMAX_TYPE__ uintmax_t; # else # define INT48_C(v) v # define UINT48_C(v) v ## U -# undef __int32_c_suffix -# undef __int16_c_suffix -# undef __int8_c_suffix # endif /* __INT48_C_SUFFIX__ */ #endif /* __INT48_TYPE__ */ #ifdef __INT40_TYPE__ +# undef __int32_c_suffix +# undef __int16_c_suffix +# undef __int8_c_suffix # ifdef __INT40_C_SUFFIX__ # define INT40_C(v) __int_c(v, __INT40_C_SUFFIX__) # define UINT40_C(v) __uint_c(v, __INT40_C_SUFFIX__) @@ -353,22 +397,18 @@ typedef __UINTMAX_TYPE__ uintmax_t; # else # define INT40_C(v) v # define UINT40_C(v) v ## U -# undef __int32_c_suffix -# undef __int16_c_suffix -# undef __int8_c_suffix # endif /* __INT40_C_SUFFIX__ */ #endif /* __INT40_TYPE__ */ #ifdef __INT32_TYPE__ +# undef __int32_c_suffix +# undef __int16_c_suffix +# undef __int8_c_suffix # ifdef __INT32_C_SUFFIX__ # define __int32_c_suffix __INT32_C_SUFFIX__ # define __int16_c_suffix __INT32_C_SUFFIX__ # define __int8_c_suffix __INT32_C_SUFFIX__ -#else -# undef __int32_c_suffix -# undef __int16_c_suffix -# undef __int8_c_suffix # endif /* __INT32_C_SUFFIX__ */ #endif /* __INT32_TYPE__ */ @@ -384,6 +424,8 @@ typedef __UINTMAX_TYPE__ uintmax_t; #ifdef __INT24_TYPE__ +# undef __int16_c_suffix +# undef __int8_c_suffix # ifdef __INT24_C_SUFFIX__ # define INT24_C(v) __int_c(v, __INT24_C_SUFFIX__) # define UINT24_C(v) __uint_c(v, __INT24_C_SUFFIX__) @@ -392,19 +434,16 @@ typedef __UINTMAX_TYPE__ uintmax_t; # else # define INT24_C(v) v # define UINT24_C(v) v ## U -# undef __int16_c_suffix -# undef __int8_c_suffix # endif /* __INT24_C_SUFFIX__ */ #endif /* __INT24_TYPE__ */ #ifdef __INT16_TYPE__ +# undef __int16_c_suffix +# undef __int8_c_suffix # ifdef __INT16_C_SUFFIX__ # define __int16_c_suffix __INT16_C_SUFFIX__ # define __int8_c_suffix __INT16_C_SUFFIX__ -#else -# undef __int16_c_suffix -# undef __int8_c_suffix # endif /* __INT16_C_SUFFIX__ */ #endif /* __INT16_TYPE__ */ @@ -420,10 +459,9 @@ typedef __UINTMAX_TYPE__ uintmax_t; #ifdef __INT8_TYPE__ +# undef __int8_c_suffix # ifdef __INT8_C_SUFFIX__ # define __int8_c_suffix __INT8_C_SUFFIX__ -#else -# undef __int8_c_suffix # endif /* __INT8_C_SUFFIX__ */ #endif /* __INT8_TYPE__ */ @@ -463,27 +501,39 @@ typedef __UINTMAX_TYPE__ uintmax_t; # define UINT64_MAX UINT64_C(18446744073709551615) /* FIXME: This is using the placeholder dates Clang produces for these macros in C2x mode; switch to the correct values once they've been published. */ -#if __STDC_VERSION__ >= 202000L +#if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 202000L # define UINT64_WIDTH 64 # define INT64_WIDTH UINT64_WIDTH # define __UINT_LEAST64_WIDTH UINT64_WIDTH +# undef __UINT_LEAST32_WIDTH # define __UINT_LEAST32_WIDTH UINT64_WIDTH +# undef __UINT_LEAST16_WIDTH # define __UINT_LEAST16_WIDTH UINT64_WIDTH +# undef __UINT_LEAST8_MAX # define __UINT_LEAST8_MAX UINT64_MAX #endif /* __STDC_VERSION__ */ # define __INT_LEAST64_MIN INT64_MIN # define __INT_LEAST64_MAX INT64_MAX # define __UINT_LEAST64_MAX UINT64_MAX +# undef __INT_LEAST32_MIN # define __INT_LEAST32_MIN INT64_MIN +# undef __INT_LEAST32_MAX # define __INT_LEAST32_MAX INT64_MAX +# undef __UINT_LEAST32_MAX # define __UINT_LEAST32_MAX UINT64_MAX +# undef __INT_LEAST16_MIN # define __INT_LEAST16_MIN INT64_MIN +# undef __INT_LEAST16_MAX # define __INT_LEAST16_MAX INT64_MAX +# undef __UINT_LEAST16_MAX # define __UINT_LEAST16_MAX UINT64_MAX +# undef __INT_LEAST8_MIN # define __INT_LEAST8_MIN INT64_MIN +# undef __INT_LEAST8_MAX # define __INT_LEAST8_MAX INT64_MAX +# undef __UINT_LEAST8_MAX # define __UINT_LEAST8_MAX UINT64_MAX #endif /* __INT64_TYPE__ */ @@ -497,7 +547,7 @@ typedef __UINTMAX_TYPE__ uintmax_t; /* FIXME: This is using the placeholder dates Clang produces for these macros in C2x mode; switch to the correct values once they've been published. */ -#if __STDC_VERSION__ >= 202000L +#if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 202000L # define UINT_LEAST64_WIDTH __UINT_LEAST64_WIDTH # define INT_LEAST64_WIDTH UINT_LEAST64_WIDTH # define UINT_FAST64_WIDTH __UINT_LEAST64_WIDTH @@ -517,27 +567,39 @@ typedef __UINTMAX_TYPE__ uintmax_t; # define INT_FAST56_MAX INT56_MAX # define UINT_FAST56_MAX UINT56_MAX +# undef __INT_LEAST32_MIN # define __INT_LEAST32_MIN INT56_MIN +# undef __INT_LEAST32_MAX # define __INT_LEAST32_MAX INT56_MAX +# undef __UINT_LEAST32_MAX # define __UINT_LEAST32_MAX UINT56_MAX +# undef __INT_LEAST16_MIN # define __INT_LEAST16_MIN INT56_MIN +# undef __INT_LEAST16_MAX # define __INT_LEAST16_MAX INT56_MAX +# undef __UINT_LEAST16_MAX # define __UINT_LEAST16_MAX UINT56_MAX +# undef __INT_LEAST8_MIN # define __INT_LEAST8_MIN INT56_MIN +# undef __INT_LEAST8_MAX # define __INT_LEAST8_MAX INT56_MAX +# undef __UINT_LEAST8_MAX # define __UINT_LEAST8_MAX UINT56_MAX /* FIXME: This is using the placeholder dates Clang produces for these macros in C2x mode; switch to the correct values once they've been published. */ -#if __STDC_VERSION__ >= 202000L +#if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 202000L # define UINT56_WIDTH 56 # define INT56_WIDTH UINT56_WIDTH # define UINT_LEAST56_WIDTH UINT56_WIDTH # define INT_LEAST56_WIDTH UINT_LEAST56_WIDTH # define UINT_FAST56_WIDTH UINT56_WIDTH # define INT_FAST56_WIDTH UINT_FAST56_WIDTH +# undef __UINT_LEAST32_WIDTH # define __UINT_LEAST32_WIDTH UINT56_WIDTH +# undef __UINT_LEAST16_WIDTH # define __UINT_LEAST16_WIDTH UINT56_WIDTH +# undef __UINT_LEAST8_WIDTH # define __UINT_LEAST8_WIDTH UINT56_WIDTH #endif /* __STDC_VERSION__ */ #endif /* __INT56_TYPE__ */ @@ -554,27 +616,39 @@ typedef __UINTMAX_TYPE__ uintmax_t; # define INT_FAST48_MAX INT48_MAX # define UINT_FAST48_MAX UINT48_MAX +# undef __INT_LEAST32_MIN # define __INT_LEAST32_MIN INT48_MIN +# undef __INT_LEAST32_MAX # define __INT_LEAST32_MAX INT48_MAX +# undef __UINT_LEAST32_MAX # define __UINT_LEAST32_MAX UINT48_MAX +# undef __INT_LEAST16_MIN # define __INT_LEAST16_MIN INT48_MIN +# undef __INT_LEAST16_MAX # define __INT_LEAST16_MAX INT48_MAX +# undef __UINT_LEAST16_MAX # define __UINT_LEAST16_MAX UINT48_MAX +# undef __INT_LEAST8_MIN # define __INT_LEAST8_MIN INT48_MIN +# undef __INT_LEAST8_MAX # define __INT_LEAST8_MAX INT48_MAX +# undef __UINT_LEAST8_MAX # define __UINT_LEAST8_MAX UINT48_MAX /* FIXME: This is using the placeholder dates Clang produces for these macros in C2x mode; switch to the correct values once they've been published. */ -#if __STDC_VERSION__ >= 202000L +#if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 202000L #define UINT48_WIDTH 48 #define INT48_WIDTH UINT48_WIDTH #define UINT_LEAST48_WIDTH UINT48_WIDTH #define INT_LEAST48_WIDTH UINT_LEAST48_WIDTH #define UINT_FAST48_WIDTH UINT48_WIDTH #define INT_FAST48_WIDTH UINT_FAST48_WIDTH +#undef __UINT_LEAST32_WIDTH #define __UINT_LEAST32_WIDTH UINT48_WIDTH +# undef __UINT_LEAST16_WIDTH #define __UINT_LEAST16_WIDTH UINT48_WIDTH +# undef __UINT_LEAST8_WIDTH #define __UINT_LEAST8_WIDTH UINT48_WIDTH #endif /* __STDC_VERSION__ */ #endif /* __INT48_TYPE__ */ @@ -591,27 +665,39 @@ typedef __UINTMAX_TYPE__ uintmax_t; # define INT_FAST40_MAX INT40_MAX # define UINT_FAST40_MAX UINT40_MAX +# undef __INT_LEAST32_MIN # define __INT_LEAST32_MIN INT40_MIN +# undef __INT_LEAST32_MAX # define __INT_LEAST32_MAX INT40_MAX +# undef __UINT_LEAST32_MAX # define __UINT_LEAST32_MAX UINT40_MAX +# undef __INT_LEAST16_MIN # define __INT_LEAST16_MIN INT40_MIN +# undef __INT_LEAST16_MAX # define __INT_LEAST16_MAX INT40_MAX +# undef __UINT_LEAST16_MAX # define __UINT_LEAST16_MAX UINT40_MAX +# undef __INT_LEAST8_MIN # define __INT_LEAST8_MIN INT40_MIN +# undef __INT_LEAST8_MAX # define __INT_LEAST8_MAX INT40_MAX +# undef __UINT_LEAST8_MAX # define __UINT_LEAST8_MAX UINT40_MAX /* FIXME: This is using the placeholder dates Clang produces for these macros in C2x mode; switch to the correct values once they've been published. */ -#if __STDC_VERSION__ >= 202000L +#if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 202000L # define UINT40_WIDTH 40 # define INT40_WIDTH UINT40_WIDTH # define UINT_LEAST40_WIDTH UINT40_WIDTH # define INT_LEAST40_WIDTH UINT_LEAST40_WIDTH # define UINT_FAST40_WIDTH UINT40_WIDTH # define INT_FAST40_WIDTH UINT_FAST40_WIDTH +# undef __UINT_LEAST32_WIDTH # define __UINT_LEAST32_WIDTH UINT40_WIDTH +# undef __UINT_LEAST16_WIDTH # define __UINT_LEAST16_WIDTH UINT40_WIDTH +# undef __UINT_LEAST8_WIDTH # define __UINT_LEAST8_WIDTH UINT40_WIDTH #endif /* __STDC_VERSION__ */ #endif /* __INT40_TYPE__ */ @@ -622,23 +708,35 @@ typedef __UINTMAX_TYPE__ uintmax_t; # define INT32_MIN (-INT32_C(2147483647)-1) # define UINT32_MAX UINT32_C(4294967295) +# undef __INT_LEAST32_MIN # define __INT_LEAST32_MIN INT32_MIN +# undef __INT_LEAST32_MAX # define __INT_LEAST32_MAX INT32_MAX +# undef __UINT_LEAST32_MAX # define __UINT_LEAST32_MAX UINT32_MAX +# undef __INT_LEAST16_MIN # define __INT_LEAST16_MIN INT32_MIN +# undef __INT_LEAST16_MAX # define __INT_LEAST16_MAX INT32_MAX +# undef __UINT_LEAST16_MAX # define __UINT_LEAST16_MAX UINT32_MAX +# undef __INT_LEAST8_MIN # define __INT_LEAST8_MIN INT32_MIN +# undef __INT_LEAST8_MAX # define __INT_LEAST8_MAX INT32_MAX +# undef __UINT_LEAST8_MAX # define __UINT_LEAST8_MAX UINT32_MAX /* FIXME: This is using the placeholder dates Clang produces for these macros in C2x mode; switch to the correct values once they've been published. */ -#if __STDC_VERSION__ >= 202000L +#if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 202000L # define UINT32_WIDTH 32 # define INT32_WIDTH UINT32_WIDTH +# undef __UINT_LEAST32_WIDTH # define __UINT_LEAST32_WIDTH UINT32_WIDTH +# undef __UINT_LEAST16_WIDTH # define __UINT_LEAST16_WIDTH UINT32_WIDTH +# undef __UINT_LEAST8_WIDTH # define __UINT_LEAST8_WIDTH UINT32_WIDTH #endif /* __STDC_VERSION__ */ #endif /* __INT32_TYPE__ */ @@ -653,7 +751,7 @@ typedef __UINTMAX_TYPE__ uintmax_t; /* FIXME: This is using the placeholder dates Clang produces for these macros in C2x mode; switch to the correct values once they've been published. */ -#if __STDC_VERSION__ >= 202000L +#if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 202000L # define UINT_LEAST32_WIDTH __UINT_LEAST32_WIDTH # define INT_LEAST32_WIDTH UINT_LEAST32_WIDTH # define UINT_FAST32_WIDTH __UINT_LEAST32_WIDTH @@ -673,23 +771,31 @@ typedef __UINTMAX_TYPE__ uintmax_t; # define INT_FAST24_MAX INT24_MAX # define UINT_FAST24_MAX UINT24_MAX +# undef __INT_LEAST16_MIN # define __INT_LEAST16_MIN INT24_MIN +# undef __INT_LEAST16_MAX # define __INT_LEAST16_MAX INT24_MAX +# undef __UINT_LEAST16_MAX # define __UINT_LEAST16_MAX UINT24_MAX +# undef __INT_LEAST8_MIN # define __INT_LEAST8_MIN INT24_MIN +# undef __INT_LEAST8_MAX # define __INT_LEAST8_MAX INT24_MAX +# undef __UINT_LEAST8_MAX # define __UINT_LEAST8_MAX UINT24_MAX /* FIXME: This is using the placeholder dates Clang produces for these macros in C2x mode; switch to the correct values once they've been published. */ -#if __STDC_VERSION__ >= 202000L +#if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 202000L # define UINT24_WIDTH 24 # define INT24_WIDTH UINT24_WIDTH # define UINT_LEAST24_WIDTH UINT24_WIDTH # define INT_LEAST24_WIDTH UINT_LEAST24_WIDTH # define UINT_FAST24_WIDTH UINT24_WIDTH # define INT_FAST24_WIDTH UINT_FAST24_WIDTH +# undef __UINT_LEAST16_WIDTH # define __UINT_LEAST16_WIDTH UINT24_WIDTH +# undef __UINT_LEAST8_WIDTH # define __UINT_LEAST8_WIDTH UINT24_WIDTH #endif /* __STDC_VERSION__ */ #endif /* __INT24_TYPE__ */ @@ -700,19 +806,27 @@ typedef __UINTMAX_TYPE__ uintmax_t; #define INT16_MIN (-INT16_C(32767)-1) #define UINT16_MAX UINT16_C(65535) +# undef __INT_LEAST16_MIN # define __INT_LEAST16_MIN INT16_MIN +# undef __INT_LEAST16_MAX # define __INT_LEAST16_MAX INT16_MAX +# undef __UINT_LEAST16_MAX # define __UINT_LEAST16_MAX UINT16_MAX +# undef __INT_LEAST8_MIN # define __INT_LEAST8_MIN INT16_MIN +# undef __INT_LEAST8_MAX # define __INT_LEAST8_MAX INT16_MAX +# undef __UINT_LEAST8_MAX # define __UINT_LEAST8_MAX UINT16_MAX /* FIXME: This is using the placeholder dates Clang produces for these macros in C2x mode; switch to the correct values once they've been published. */ -#if __STDC_VERSION__ >= 202000L +#if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 202000L # define UINT16_WIDTH 16 # define INT16_WIDTH UINT16_WIDTH +# undef __UINT_LEAST16_WIDTH # define __UINT_LEAST16_WIDTH UINT16_WIDTH +# undef __UINT_LEAST8_WIDTH # define __UINT_LEAST8_WIDTH UINT16_WIDTH #endif /* __STDC_VERSION__ */ #endif /* __INT16_TYPE__ */ @@ -727,7 +841,7 @@ typedef __UINTMAX_TYPE__ uintmax_t; /* FIXME: This is using the placeholder dates Clang produces for these macros in C2x mode; switch to the correct values once they've been published. */ -#if __STDC_VERSION__ >= 202000L +#if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 202000L # define UINT_LEAST16_WIDTH __UINT_LEAST16_WIDTH # define INT_LEAST16_WIDTH UINT_LEAST16_WIDTH # define UINT_FAST16_WIDTH __UINT_LEAST16_WIDTH @@ -741,15 +855,19 @@ typedef __UINTMAX_TYPE__ uintmax_t; # define INT8_MIN (-INT8_C(127)-1) # define UINT8_MAX UINT8_C(255) +# undef __INT_LEAST8_MIN # define __INT_LEAST8_MIN INT8_MIN +# undef __INT_LEAST8_MAX # define __INT_LEAST8_MAX INT8_MAX +# undef __UINT_LEAST8_MAX # define __UINT_LEAST8_MAX UINT8_MAX /* FIXME: This is using the placeholder dates Clang produces for these macros in C2x mode; switch to the correct values once they've been published. */ -#if __STDC_VERSION__ >= 202000L +#if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 202000L # define UINT8_WIDTH 8 # define INT8_WIDTH UINT8_WIDTH +# undef __UINT_LEAST8_WIDTH # define __UINT_LEAST8_WIDTH UINT8_WIDTH #endif /* __STDC_VERSION__ */ #endif /* __INT8_TYPE__ */ @@ -764,7 +882,7 @@ typedef __UINTMAX_TYPE__ uintmax_t; /* FIXME: This is using the placeholder dates Clang produces for these macros in C2x mode; switch to the correct values once they've been published. */ -#if __STDC_VERSION__ >= 202000L +#if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 202000L # define UINT_LEAST8_WIDTH __UINT_LEAST8_WIDTH # define INT_LEAST8_WIDTH UINT_LEAST8_WIDTH # define UINT_FAST8_WIDTH __UINT_LEAST8_WIDTH @@ -792,7 +910,7 @@ typedef __UINTMAX_TYPE__ uintmax_t; /* C2x 7.20.2.4 Width of integer types capable of holding object pointers. */ /* FIXME: This is using the placeholder dates Clang produces for these macros in C2x mode; switch to the correct values once they've been published. */ -#if __STDC_VERSION__ >= 202000L +#if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 202000L /* NB: The C standard requires that these be the same value, but the compiler exposes separate internal width macros. */ #define INTPTR_WIDTH __INTPTR_WIDTH__ @@ -813,7 +931,7 @@ typedef __UINTMAX_TYPE__ uintmax_t; /* C2x 7.20.2.5 Width of greatest-width integer types. */ /* FIXME: This is using the placeholder dates Clang produces for these macros in C2x mode; switch to the correct values once they've been published. */ -#if __STDC_VERSION__ >= 202000L +#if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 202000L /* NB: The C standard requires that these be the same value, but the compiler exposes separate internal width macros. */ #define INTMAX_WIDTH __INTMAX_WIDTH__ @@ -849,7 +967,7 @@ typedef __UINTMAX_TYPE__ uintmax_t; /* C2x 7.20.3.x Width of other integer types. */ /* FIXME: This is using the placeholder dates Clang produces for these macros in C2x mode; switch to the correct values once they've been published. */ -#if __STDC_VERSION__ >= 202000L +#if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 202000L #define PTRDIFF_WIDTH __PTRDIFF_WIDTH__ #define SIG_ATOMIC_WIDTH __SIG_ATOMIC_WIDTH__ #define SIZE_WIDTH __SIZE_WIDTH__ diff --git a/lib/include/stdnoreturn.h b/lib/include/stdnoreturn.h index 7d19fa7b2f..967be94762 100644 --- a/lib/include/stdnoreturn.h +++ b/lib/include/stdnoreturn.h @@ -13,7 +13,7 @@ #define noreturn _Noreturn #define __noreturn_is_defined 1 -#if __STDC_VERSION__ > 201710L && \ +#if (defined(__STDC_VERSION__) && __STDC_VERSION__ > 201710L) && \ !defined(_CLANG_DISABLE_CRT_DEPRECATION_WARNINGS) /* The noreturn macro is deprecated in C2x. We do not mark it as such because including the header file in C2x is also deprecated and we do not want to diff --git a/lib/include/unwind.h b/lib/include/unwind.h index 971a62da0d..33e1792cd1 100644 --- a/lib/include/unwind.h +++ b/lib/include/unwind.h @@ -65,7 +65,8 @@ struct _Unwind_Context; #if defined(__arm__) && !(defined(__USING_SJLJ_EXCEPTIONS__) || \ defined(__ARM_DWARF_EH__) || defined(__SEH__)) struct _Unwind_Control_Block; -typedef struct _Unwind_Control_Block _Unwind_Exception; /* Alias */ +typedef struct _Unwind_Control_Block _Unwind_Control_Block; +#define _Unwind_Exception _Unwind_Control_Block /* Alias */ #else struct _Unwind_Exception; typedef struct _Unwind_Exception _Unwind_Exception; diff --git a/lib/include/velintrin.h b/lib/include/velintrin.h index 69b1fba296..3f2bc00442 100644 --- a/lib/include/velintrin.h +++ b/lib/include/velintrin.h @@ -13,7 +13,7 @@ typedef double __vr __attribute__((__vector_size__(2048))); // Vector mask registers -#if __STDC_VERSION__ >= 199901L +#if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L // For C99 typedef _Bool __vm __attribute__((ext_vector_type(256))); typedef _Bool __vm256 __attribute__((ext_vector_type(256))); diff --git a/lib/include/x86gprintrin.h b/lib/include/x86gprintrin.h index 2c2fbb97c9..f9a765be43 100644 --- a/lib/include/x86gprintrin.h +++ b/lib/include/x86gprintrin.h @@ -25,23 +25,35 @@ #include #endif +#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ + defined(__PRFCHI__) +#include +#endif + +#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ + defined(__RAOINT__) +#include +#endif + +#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ + defined(__CMPCCXADD__) +#include +#endif + #if defined(__i386__) -#define __FULLBX "ebx" +#define __SAVE_GPRBX "mov {%%ebx, %%eax |eax, ebx};" +#define __RESTORE_GPRBX "mov {%%eax, %%ebx |ebx, eax};" #define __TMPGPR "eax" #else // When in 64-bit target, the 32-bit operands generate a 32-bit result, // zero-extended to a 64-bit result in the destination general-purpose, // It means "mov x %ebx" will clobber the higher 32 bits of rbx, so we // should preserve the 64-bit register rbx. -#define __FULLBX "rbx" +#define __SAVE_GPRBX "mov {%%rbx, %%rax |rax, rbx};" +#define __RESTORE_GPRBX "mov {%%rax, %%rbx |rbx, rax};" #define __TMPGPR "rax" #endif -#define __MOVEGPR(__r1, __r2) "mov {%%"__r1 ", %%"__r2 "|"__r2 ", "__r1"};" - -#define __SAVE_GPRBX __MOVEGPR(__FULLBX, __TMPGPR) -#define __RESTORE_GPRBX __MOVEGPR(__TMPGPR, __FULLBX) - #define __SSC_MARK(__Tag) \ __asm__ __volatile__( __SAVE_GPRBX \ "mov {%0, %%ebx|ebx, %0}; " \ diff --git a/lib/include/xmmintrin.h b/lib/include/xmmintrin.h index 4aa70d6e55..80aa2a817f 100644 --- a/lib/include/xmmintrin.h +++ b/lib/include/xmmintrin.h @@ -1906,7 +1906,7 @@ _mm_setr_ps(float __z, float __y, float __x, float __w) static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_setzero_ps(void) { - return __extension__ (__m128){ 0, 0, 0, 0 }; + return __extension__ (__m128){ 0.0f, 0.0f, 0.0f, 0.0f }; } /// Stores the upper 64 bits of a 128-bit vector of [4 x float] to a @@ -3005,7 +3005,6 @@ do { \ #define _m_pavgw _mm_avg_pu16 #define _m_psadbw _mm_sad_pu8 #define _m_ _mm_ -#define _m_ _mm_ #undef __DEFAULT_FN_ATTRS #undef __DEFAULT_FN_ATTRS_MMX