zig cc: update intrinsic headers to LLVM 21

This commit is contained in:
Alex Rønne Petersen 2025-07-16 04:52:10 +02:00
parent b7a8c045ef
commit ce7339e80a
No known key found for this signature in database
45 changed files with 29569 additions and 25385 deletions

217
lib/include/__clang_spirv_builtins.h vendored Normal file
View File

@ -0,0 +1,217 @@
/*===---- spirv_builtin_vars.h - SPIR-V built-in ---------------------------===
*
* Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
* See https://llvm.org/LICENSE.txt for license information.
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*
*===-----------------------------------------------------------------------===
*/
#ifndef __SPIRV_BUILTIN_VARS_H
#define __SPIRV_BUILTIN_VARS_H
#if __cplusplus >= 201103L
#define __SPIRV_NOEXCEPT noexcept
#else
#define __SPIRV_NOEXCEPT
#endif
#pragma push_macro("__size_t")
#pragma push_macro("__uint32_t")
#pragma push_macro("__uint64_t")
#define __size_t __SIZE_TYPE__
#define __uint32_t __UINT32_TYPE__
#define __SPIRV_overloadable __attribute__((overloadable))
#define __SPIRV_convergent __attribute__((convergent))
#define __SPIRV_inline __attribute__((always_inline))
#define __global __attribute__((opencl_global))
#define __local __attribute__((opencl_local))
#define __private __attribute__((opencl_private))
#define __constant __attribute__((opencl_constant))
#ifdef __SYCL_DEVICE_ONLY__
#define __generic
#else
#define __generic __attribute__((opencl_generic))
#endif
// Check if SPIR-V builtins are supported.
// As the translator doesn't use the LLVM intrinsics (which would be emitted if
// we use the SPIR-V builtins) we can't rely on the SPIRV32/SPIRV64 etc macros
// to establish if we can use the builtin alias. We disable builtin altogether
// if we do not intent to use the backend. So instead of use target macros, rely
// on a __has_builtin test.
#if (__has_builtin(__builtin_spirv_num_workgroups))
#define __SPIRV_BUILTIN_ALIAS(builtin) \
__attribute__((clang_builtin_alias(builtin)))
#else
#define __SPIRV_BUILTIN_ALIAS(builtin)
#endif
// Builtin IDs and sizes
extern __SPIRV_BUILTIN_ALIAS(__builtin_spirv_num_workgroups) __size_t
__spirv_NumWorkgroups(int);
extern __SPIRV_BUILTIN_ALIAS(__builtin_spirv_workgroup_size) __size_t
__spirv_WorkgroupSize(int);
extern __SPIRV_BUILTIN_ALIAS(__builtin_spirv_workgroup_id) __size_t
__spirv_WorkgroupId(int);
extern __SPIRV_BUILTIN_ALIAS(__builtin_spirv_local_invocation_id) __size_t
__spirv_LocalInvocationId(int);
extern __SPIRV_BUILTIN_ALIAS(__builtin_spirv_global_invocation_id) __size_t
__spirv_GlobalInvocationId(int);
extern __SPIRV_BUILTIN_ALIAS(__builtin_spirv_global_size) __size_t
__spirv_GlobalSize(int);
extern __SPIRV_BUILTIN_ALIAS(__builtin_spirv_global_offset) __size_t
__spirv_GlobalOffset(int);
extern __SPIRV_BUILTIN_ALIAS(__builtin_spirv_subgroup_size) __uint32_t
__spirv_SubgroupSize();
extern __SPIRV_BUILTIN_ALIAS(__builtin_spirv_subgroup_max_size) __uint32_t
__spirv_SubgroupMaxSize();
extern __SPIRV_BUILTIN_ALIAS(__builtin_spirv_num_subgroups) __uint32_t
__spirv_NumSubgroups();
extern __SPIRV_BUILTIN_ALIAS(__builtin_spirv_subgroup_id) __uint32_t
__spirv_SubgroupId();
extern __SPIRV_BUILTIN_ALIAS(__builtin_spirv_subgroup_local_invocation_id)
__uint32_t __spirv_SubgroupLocalInvocationId();
// OpGenericCastToPtrExplicit
extern __SPIRV_overloadable
__SPIRV_BUILTIN_ALIAS(__builtin_spirv_generic_cast_to_ptr_explicit)
__global void *__spirv_GenericCastToPtrExplicit_ToGlobal(__generic void *,
int) __SPIRV_NOEXCEPT;
extern __SPIRV_overloadable
__SPIRV_BUILTIN_ALIAS(__builtin_spirv_generic_cast_to_ptr_explicit)
__global const void *
__spirv_GenericCastToPtrExplicit_ToGlobal(__generic const void *,
int) __SPIRV_NOEXCEPT;
extern __SPIRV_overloadable
__SPIRV_BUILTIN_ALIAS(__builtin_spirv_generic_cast_to_ptr_explicit)
__global volatile void *
__spirv_GenericCastToPtrExplicit_ToGlobal(__generic volatile void *,
int) __SPIRV_NOEXCEPT;
extern __SPIRV_overloadable
__SPIRV_BUILTIN_ALIAS(__builtin_spirv_generic_cast_to_ptr_explicit)
__global const volatile void *
__spirv_GenericCastToPtrExplicit_ToGlobal(__generic const volatile void *,
int) __SPIRV_NOEXCEPT;
extern __SPIRV_overloadable
__SPIRV_BUILTIN_ALIAS(__builtin_spirv_generic_cast_to_ptr_explicit)
__local void *__spirv_GenericCastToPtrExplicit_ToLocal(__generic void *,
int) __SPIRV_NOEXCEPT;
extern __SPIRV_overloadable
__SPIRV_BUILTIN_ALIAS(__builtin_spirv_generic_cast_to_ptr_explicit)
__local const void *
__spirv_GenericCastToPtrExplicit_ToLocal(__generic const void *,
int) __SPIRV_NOEXCEPT;
extern __SPIRV_overloadable
__SPIRV_BUILTIN_ALIAS(__builtin_spirv_generic_cast_to_ptr_explicit)
__local volatile void *
__spirv_GenericCastToPtrExplicit_ToLocal(__generic volatile void *,
int) __SPIRV_NOEXCEPT;
extern __SPIRV_overloadable
__SPIRV_BUILTIN_ALIAS(__builtin_spirv_generic_cast_to_ptr_explicit)
__local const volatile void *
__spirv_GenericCastToPtrExplicit_ToLocal(__generic const volatile void *,
int) __SPIRV_NOEXCEPT;
extern __SPIRV_overloadable
__SPIRV_BUILTIN_ALIAS(__builtin_spirv_generic_cast_to_ptr_explicit)
__private void *
__spirv_GenericCastToPtrExplicit_ToPrivate(__generic void *,
int) __SPIRV_NOEXCEPT;
extern __SPIRV_overloadable
__SPIRV_BUILTIN_ALIAS(__builtin_spirv_generic_cast_to_ptr_explicit)
__private const void *
__spirv_GenericCastToPtrExplicit_ToPrivate(__generic const void *,
int) __SPIRV_NOEXCEPT;
extern __SPIRV_overloadable
__SPIRV_BUILTIN_ALIAS(__builtin_spirv_generic_cast_to_ptr_explicit)
__private volatile void *
__spirv_GenericCastToPtrExplicit_ToPrivate(__generic volatile void *,
int) __SPIRV_NOEXCEPT;
extern __SPIRV_overloadable
__SPIRV_BUILTIN_ALIAS(__builtin_spirv_generic_cast_to_ptr_explicit)
__private const volatile void *
__spirv_GenericCastToPtrExplicit_ToPrivate(__generic const volatile void *,
int) __SPIRV_NOEXCEPT;
// OpGenericCastToPtr
static __SPIRV_overloadable __SPIRV_inline __global void *
__spirv_GenericCastToPtr_ToGlobal(__generic void *p, int) __SPIRV_NOEXCEPT {
return (__global void *)p;
}
static __SPIRV_overloadable __SPIRV_inline __global const void *
__spirv_GenericCastToPtr_ToGlobal(__generic const void *p,
int) __SPIRV_NOEXCEPT {
return (__global const void *)p;
}
static __SPIRV_overloadable __SPIRV_inline __global volatile void *
__spirv_GenericCastToPtr_ToGlobal(__generic volatile void *p,
int) __SPIRV_NOEXCEPT {
return (__global volatile void *)p;
}
static __SPIRV_overloadable __SPIRV_inline __global const volatile void *
__spirv_GenericCastToPtr_ToGlobal(__generic const volatile void *p,
int) __SPIRV_NOEXCEPT {
return (__global const volatile void *)p;
}
static __SPIRV_overloadable __SPIRV_inline __local void *
__spirv_GenericCastToPtr_ToLocal(__generic void *p, int) __SPIRV_NOEXCEPT {
return (__local void *)p;
}
static __SPIRV_overloadable __SPIRV_inline __local const void *
__spirv_GenericCastToPtr_ToLocal(__generic const void *p,
int) __SPIRV_NOEXCEPT {
return (__local const void *)p;
}
static __SPIRV_overloadable __SPIRV_inline __local volatile void *
__spirv_GenericCastToPtr_ToLocal(__generic volatile void *p,
int) __SPIRV_NOEXCEPT {
return (__local volatile void *)p;
}
static __SPIRV_overloadable __SPIRV_inline __local const volatile void *
__spirv_GenericCastToPtr_ToLocal(__generic const volatile void *p,
int) __SPIRV_NOEXCEPT {
return (__local const volatile void *)p;
}
static __SPIRV_overloadable __SPIRV_inline __private void *
__spirv_GenericCastToPtr_ToPrivate(__generic void *p, int) __SPIRV_NOEXCEPT {
return (__private void *)p;
}
static __SPIRV_overloadable __SPIRV_inline __private const void *
__spirv_GenericCastToPtr_ToPrivate(__generic const void *p,
int) __SPIRV_NOEXCEPT {
return (__private const void *)p;
}
static __SPIRV_overloadable __SPIRV_inline __private volatile void *
__spirv_GenericCastToPtr_ToPrivate(__generic volatile void *p,
int) __SPIRV_NOEXCEPT {
return (__private volatile void *)p;
}
static __SPIRV_overloadable __SPIRV_inline __private const volatile void *
__spirv_GenericCastToPtr_ToPrivate(__generic const volatile void *p,
int) __SPIRV_NOEXCEPT {
return (__private const volatile void *)p;
}
#pragma pop_macro("__size_t")
#pragma pop_macro("__uint32_t")
#pragma pop_macro("__uint64_t")
#undef __SPIRV_overloadable
#undef __SPIRV_convergent
#undef __SPIRV_inline
#undef __global
#undef __local
#undef __constant
#undef __generic
#undef __SPIRV_BUILTIN_ALIAS
#undef __SPIRV_NOEXCEPT
#endif /* __SPIRV_BUILTIN_VARS_H */

View File

@ -10,8 +10,8 @@
#ifndef va_arg
#if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 202311L
/* C23 does not require the second parameter for va_start. */
#define va_start(ap, ...) __builtin_va_start(ap, 0)
/* C23 uses a special builtin. */
#define va_start(...) __builtin_c23_va_start(__VA_ARGS__)
#else
/* Versions before C23 do require the second parameter. */
#define va_start(ap, param) __builtin_va_start(ap, param)

113
lib/include/altivec.h vendored
View File

@ -17525,70 +17525,73 @@ vec_bperm(vector unsigned long long __a, vector unsigned char __b) {
/* vec_reve */
static inline __ATTRS_o_ai vector bool char vec_reve(vector bool char __a) {
static __inline__ __ATTRS_o_ai vector bool char vec_reve(vector bool char __a) {
return __builtin_shufflevector(__a, __a, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6,
5, 4, 3, 2, 1, 0);
}
static inline __ATTRS_o_ai vector signed char vec_reve(vector signed char __a) {
static __inline__ __ATTRS_o_ai vector signed char
vec_reve(vector signed char __a) {
return __builtin_shufflevector(__a, __a, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6,
5, 4, 3, 2, 1, 0);
}
static inline __ATTRS_o_ai vector unsigned char
static __inline__ __ATTRS_o_ai vector unsigned char
vec_reve(vector unsigned char __a) {
return __builtin_shufflevector(__a, __a, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6,
5, 4, 3, 2, 1, 0);
}
static inline __ATTRS_o_ai vector bool int vec_reve(vector bool int __a) {
static __inline__ __ATTRS_o_ai vector bool int vec_reve(vector bool int __a) {
return __builtin_shufflevector(__a, __a, 3, 2, 1, 0);
}
static inline __ATTRS_o_ai vector signed int vec_reve(vector signed int __a) {
static __inline__ __ATTRS_o_ai vector signed int
vec_reve(vector signed int __a) {
return __builtin_shufflevector(__a, __a, 3, 2, 1, 0);
}
static inline __ATTRS_o_ai vector unsigned int
static __inline__ __ATTRS_o_ai vector unsigned int
vec_reve(vector unsigned int __a) {
return __builtin_shufflevector(__a, __a, 3, 2, 1, 0);
}
static inline __ATTRS_o_ai vector bool short vec_reve(vector bool short __a) {
static __inline__ __ATTRS_o_ai vector bool short
vec_reve(vector bool short __a) {
return __builtin_shufflevector(__a, __a, 7, 6, 5, 4, 3, 2, 1, 0);
}
static inline __ATTRS_o_ai vector signed short
static __inline__ __ATTRS_o_ai vector signed short
vec_reve(vector signed short __a) {
return __builtin_shufflevector(__a, __a, 7, 6, 5, 4, 3, 2, 1, 0);
}
static inline __ATTRS_o_ai vector unsigned short
static __inline__ __ATTRS_o_ai vector unsigned short
vec_reve(vector unsigned short __a) {
return __builtin_shufflevector(__a, __a, 7, 6, 5, 4, 3, 2, 1, 0);
}
static inline __ATTRS_o_ai vector float vec_reve(vector float __a) {
static __inline__ __ATTRS_o_ai vector float vec_reve(vector float __a) {
return __builtin_shufflevector(__a, __a, 3, 2, 1, 0);
}
#ifdef __VSX__
static inline __ATTRS_o_ai vector bool long long
static __inline__ __ATTRS_o_ai vector bool long long
vec_reve(vector bool long long __a) {
return __builtin_shufflevector(__a, __a, 1, 0);
}
static inline __ATTRS_o_ai vector signed long long
static __inline__ __ATTRS_o_ai vector signed long long
vec_reve(vector signed long long __a) {
return __builtin_shufflevector(__a, __a, 1, 0);
}
static inline __ATTRS_o_ai vector unsigned long long
static __inline__ __ATTRS_o_ai vector unsigned long long
vec_reve(vector unsigned long long __a) {
return __builtin_shufflevector(__a, __a, 1, 0);
}
static inline __ATTRS_o_ai vector double vec_reve(vector double __a) {
static __inline__ __ATTRS_o_ai vector double vec_reve(vector double __a) {
return __builtin_shufflevector(__a, __a, 1, 0);
}
#endif
@ -17721,41 +17724,41 @@ typedef vector signed int unaligned_vec_sint __attribute__((aligned(1)));
typedef vector unsigned int unaligned_vec_uint __attribute__((aligned(1)));
typedef vector float unaligned_vec_float __attribute__((aligned(1)));
static inline __ATTRS_o_ai vector signed char vec_xl(ptrdiff_t __offset,
const signed char *__ptr) {
static __inline__ __ATTRS_o_ai vector signed char
vec_xl(ptrdiff_t __offset, const signed char *__ptr) {
return *(unaligned_vec_schar *)(__ptr + __offset);
}
static inline __ATTRS_o_ai vector unsigned char
static __inline__ __ATTRS_o_ai vector unsigned char
vec_xl(ptrdiff_t __offset, const unsigned char *__ptr) {
return *(unaligned_vec_uchar*)(__ptr + __offset);
}
static inline __ATTRS_o_ai vector signed short
static __inline__ __ATTRS_o_ai vector signed short
vec_xl(ptrdiff_t __offset, const signed short *__ptr) {
signed char *__addr = (signed char *)__ptr + __offset;
return *(unaligned_vec_sshort *)__addr;
}
static inline __ATTRS_o_ai vector unsigned short
static __inline__ __ATTRS_o_ai vector unsigned short
vec_xl(ptrdiff_t __offset, const unsigned short *__ptr) {
signed char *__addr = (signed char *)__ptr + __offset;
return *(unaligned_vec_ushort *)__addr;
}
static inline __ATTRS_o_ai vector signed int vec_xl(ptrdiff_t __offset,
const signed int *__ptr) {
static __inline__ __ATTRS_o_ai vector signed int
vec_xl(ptrdiff_t __offset, const signed int *__ptr) {
signed char *__addr = (signed char *)__ptr + __offset;
return *(unaligned_vec_sint *)__addr;
}
static inline __ATTRS_o_ai vector unsigned int
static __inline__ __ATTRS_o_ai vector unsigned int
vec_xl(ptrdiff_t __offset, const unsigned int *__ptr) {
signed char *__addr = (signed char *)__ptr + __offset;
return *(unaligned_vec_uint *)__addr;
}
static inline __ATTRS_o_ai vector float vec_xl(ptrdiff_t __offset,
static __inline__ __ATTRS_o_ai vector float vec_xl(ptrdiff_t __offset,
const float *__ptr) {
signed char *__addr = (signed char *)__ptr + __offset;
return *(unaligned_vec_float *)__addr;
@ -17766,19 +17769,19 @@ typedef vector signed long long unaligned_vec_sll __attribute__((aligned(1)));
typedef vector unsigned long long unaligned_vec_ull __attribute__((aligned(1)));
typedef vector double unaligned_vec_double __attribute__((aligned(1)));
static inline __ATTRS_o_ai vector signed long long
static __inline__ __ATTRS_o_ai vector signed long long
vec_xl(ptrdiff_t __offset, const signed long long *__ptr) {
signed char *__addr = (signed char *)__ptr + __offset;
return *(unaligned_vec_sll *)__addr;
}
static inline __ATTRS_o_ai vector unsigned long long
static __inline__ __ATTRS_o_ai vector unsigned long long
vec_xl(ptrdiff_t __offset, const unsigned long long *__ptr) {
signed char *__addr = (signed char *)__ptr + __offset;
return *(unaligned_vec_ull *)__addr;
}
static inline __ATTRS_o_ai vector double vec_xl(ptrdiff_t __offset,
static __inline__ __ATTRS_o_ai vector double vec_xl(ptrdiff_t __offset,
const double *__ptr) {
signed char *__addr = (signed char *)__ptr + __offset;
return *(unaligned_vec_double *)__addr;
@ -17790,13 +17793,13 @@ static inline __ATTRS_o_ai vector double vec_xl(ptrdiff_t __offset,
typedef vector signed __int128 unaligned_vec_si128 __attribute__((aligned(1)));
typedef vector unsigned __int128 unaligned_vec_ui128
__attribute__((aligned(1)));
static inline __ATTRS_o_ai vector signed __int128
static __inline__ __ATTRS_o_ai vector signed __int128
vec_xl(ptrdiff_t __offset, const signed __int128 *__ptr) {
signed char *__addr = (signed char *)__ptr + __offset;
return *(unaligned_vec_si128 *)__addr;
}
static inline __ATTRS_o_ai vector unsigned __int128
static __inline__ __ATTRS_o_ai vector unsigned __int128
vec_xl(ptrdiff_t __offset, const unsigned __int128 *__ptr) {
signed char *__addr = (signed char *)__ptr + __offset;
return *(unaligned_vec_ui128 *)__addr;
@ -17991,64 +17994,64 @@ vec_load_splats(unsigned long long __offset, const float *__ptr) {
#define vec_xstd2 vec_xst
#define vec_xstw4 vec_xst
static inline __ATTRS_o_ai void
static __inline__ __ATTRS_o_ai void
vec_xst(vector signed char __vec, ptrdiff_t __offset, signed char *__ptr) {
*(unaligned_vec_schar *)(__ptr + __offset) = __vec;
}
static inline __ATTRS_o_ai void
static __inline__ __ATTRS_o_ai void
vec_xst(vector unsigned char __vec, ptrdiff_t __offset, unsigned char *__ptr) {
*(unaligned_vec_uchar *)(__ptr + __offset) = __vec;
}
static inline __ATTRS_o_ai void
static __inline__ __ATTRS_o_ai void
vec_xst(vector signed short __vec, ptrdiff_t __offset, signed short *__ptr) {
signed char *__addr = (signed char *)__ptr + __offset;
*(unaligned_vec_sshort *)__addr = __vec;
}
static inline __ATTRS_o_ai void vec_xst(vector unsigned short __vec,
static __inline__ __ATTRS_o_ai void vec_xst(vector unsigned short __vec,
ptrdiff_t __offset,
unsigned short *__ptr) {
signed char *__addr = (signed char *)__ptr + __offset;
*(unaligned_vec_ushort *)__addr = __vec;
}
static inline __ATTRS_o_ai void vec_xst(vector signed int __vec,
ptrdiff_t __offset, signed int *__ptr) {
static __inline__ __ATTRS_o_ai void
vec_xst(vector signed int __vec, ptrdiff_t __offset, signed int *__ptr) {
signed char *__addr = (signed char *)__ptr + __offset;
*(unaligned_vec_sint *)__addr = __vec;
}
static inline __ATTRS_o_ai void
static __inline__ __ATTRS_o_ai void
vec_xst(vector unsigned int __vec, ptrdiff_t __offset, unsigned int *__ptr) {
signed char *__addr = (signed char *)__ptr + __offset;
*(unaligned_vec_uint *)__addr = __vec;
}
static inline __ATTRS_o_ai void vec_xst(vector float __vec, ptrdiff_t __offset,
float *__ptr) {
static __inline__ __ATTRS_o_ai void vec_xst(vector float __vec,
ptrdiff_t __offset, float *__ptr) {
signed char *__addr = (signed char *)__ptr + __offset;
*(unaligned_vec_float *)__addr = __vec;
}
#ifdef __VSX__
static inline __ATTRS_o_ai void vec_xst(vector signed long long __vec,
static __inline__ __ATTRS_o_ai void vec_xst(vector signed long long __vec,
ptrdiff_t __offset,
signed long long *__ptr) {
signed char *__addr = (signed char *)__ptr + __offset;
*(unaligned_vec_sll *)__addr = __vec;
}
static inline __ATTRS_o_ai void vec_xst(vector unsigned long long __vec,
static __inline__ __ATTRS_o_ai void vec_xst(vector unsigned long long __vec,
ptrdiff_t __offset,
unsigned long long *__ptr) {
signed char *__addr = (signed char *)__ptr + __offset;
*(unaligned_vec_ull *)__addr = __vec;
}
static inline __ATTRS_o_ai void vec_xst(vector double __vec, ptrdiff_t __offset,
double *__ptr) {
static __inline__ __ATTRS_o_ai void vec_xst(vector double __vec,
ptrdiff_t __offset, double *__ptr) {
signed char *__addr = (signed char *)__ptr + __offset;
*(unaligned_vec_double *)__addr = __vec;
}
@ -18056,14 +18059,14 @@ static inline __ATTRS_o_ai void vec_xst(vector double __vec, ptrdiff_t __offset,
#if defined(__POWER8_VECTOR__) && defined(__powerpc64__) && \
defined(__SIZEOF_INT128__)
static inline __ATTRS_o_ai void vec_xst(vector signed __int128 __vec,
static __inline__ __ATTRS_o_ai void vec_xst(vector signed __int128 __vec,
ptrdiff_t __offset,
signed __int128 *__ptr) {
signed char *__addr = (signed char *)__ptr + __offset;
*(unaligned_vec_si128 *)__addr = __vec;
}
static inline __ATTRS_o_ai void vec_xst(vector unsigned __int128 __vec,
static __inline__ __ATTRS_o_ai void vec_xst(vector unsigned __int128 __vec,
ptrdiff_t __offset,
unsigned __int128 *__ptr) {
signed char *__addr = (signed char *)__ptr + __offset;
@ -18075,50 +18078,50 @@ static inline __ATTRS_o_ai void vec_xst(vector unsigned __int128 __vec,
#if defined(__POWER10_VECTOR__) && defined(__VSX__) && \
defined(__SIZEOF_INT128__)
static inline __ATTRS_o_ai void vec_xst_trunc(vector signed __int128 __vec,
static __inline__ __ATTRS_o_ai void vec_xst_trunc(vector signed __int128 __vec,
ptrdiff_t __offset,
signed char *__ptr) {
*(__ptr + __offset) = (signed char)__vec[0];
}
static inline __ATTRS_o_ai void vec_xst_trunc(vector unsigned __int128 __vec,
ptrdiff_t __offset,
static __inline__ __ATTRS_o_ai void
vec_xst_trunc(vector unsigned __int128 __vec, ptrdiff_t __offset,
unsigned char *__ptr) {
*(__ptr + __offset) = (unsigned char)__vec[0];
}
static inline __ATTRS_o_ai void vec_xst_trunc(vector signed __int128 __vec,
static __inline__ __ATTRS_o_ai void vec_xst_trunc(vector signed __int128 __vec,
ptrdiff_t __offset,
signed short *__ptr) {
*(__ptr + __offset) = (signed short)__vec[0];
}
static inline __ATTRS_o_ai void vec_xst_trunc(vector unsigned __int128 __vec,
ptrdiff_t __offset,
static __inline__ __ATTRS_o_ai void
vec_xst_trunc(vector unsigned __int128 __vec, ptrdiff_t __offset,
unsigned short *__ptr) {
*(__ptr + __offset) = (unsigned short)__vec[0];
}
static inline __ATTRS_o_ai void vec_xst_trunc(vector signed __int128 __vec,
static __inline__ __ATTRS_o_ai void vec_xst_trunc(vector signed __int128 __vec,
ptrdiff_t __offset,
signed int *__ptr) {
*(__ptr + __offset) = (signed int)__vec[0];
}
static inline __ATTRS_o_ai void vec_xst_trunc(vector unsigned __int128 __vec,
ptrdiff_t __offset,
static __inline__ __ATTRS_o_ai void
vec_xst_trunc(vector unsigned __int128 __vec, ptrdiff_t __offset,
unsigned int *__ptr) {
*(__ptr + __offset) = (unsigned int)__vec[0];
}
static inline __ATTRS_o_ai void vec_xst_trunc(vector signed __int128 __vec,
static __inline__ __ATTRS_o_ai void vec_xst_trunc(vector signed __int128 __vec,
ptrdiff_t __offset,
signed long long *__ptr) {
*(__ptr + __offset) = (signed long long)__vec[0];
}
static inline __ATTRS_o_ai void vec_xst_trunc(vector unsigned __int128 __vec,
ptrdiff_t __offset,
static __inline__ __ATTRS_o_ai void
vec_xst_trunc(vector unsigned __int128 __vec, ptrdiff_t __offset,
unsigned long long *__ptr) {
*(__ptr + __offset) = (unsigned long long)__vec[0];
}

View File

@ -228,7 +228,7 @@
/// dst.byte[i] := a.row[row_index].byte[row_chunk+i]
/// ENDFOR
/// \endcode
#define _tile_movrow(a, b) __builtin_ia32_tilemovrow(a, b)
#define _tile_movrow(a, b) ((__m512i)__builtin_ia32_tilemovrow(a, b))
/// This is internal intrinsic. C/C++ user should avoid calling it directly.

View File

@ -135,9 +135,8 @@ _tile_cmmrlfp16ps_internal(unsigned short m, unsigned short n, unsigned short k,
/// The 1st source tile. Max size is 1024 Bytes.
/// \param src1
/// The 2nd source tile. Max size is 1024 Bytes.
__DEFAULT_FN_ATTRS_COMPLEX
static void __tile_cmmimfp16ps(__tile1024i *dst, __tile1024i src0,
__tile1024i src1) {
static __inline__ void __DEFAULT_FN_ATTRS_COMPLEX
__tile_cmmimfp16ps(__tile1024i *dst, __tile1024i src0, __tile1024i src1) {
dst->tile = _tile_cmmimfp16ps_internal(src0.row, src1.col, src0.col,
dst->tile, src0.tile, src1.tile);
}
@ -158,9 +157,8 @@ static void __tile_cmmimfp16ps(__tile1024i *dst, __tile1024i src0,
/// The 1st source tile. Max size is 1024 Bytes.
/// \param src1
/// The 2nd source tile. Max size is 1024 Bytes.
__DEFAULT_FN_ATTRS_COMPLEX
static void __tile_cmmrlfp16ps(__tile1024i *dst, __tile1024i src0,
__tile1024i src1) {
static __inline__ void __DEFAULT_FN_ATTRS_COMPLEX
__tile_cmmrlfp16ps(__tile1024i *dst, __tile1024i src0, __tile1024i src1) {
dst->tile = _tile_cmmrlfp16ps_internal(src0.row, src1.col, src0.col,
dst->tile, src0.tile, src1.tile);
}

View File

@ -8,7 +8,7 @@
*/
#ifndef __IMMINTRIN_H
#error \
"Never use <amxtf32tranposeintrin.h> directly; include <immintrin.h> instead."
"Never use <amxtf32transposeintrin.h> directly; include <immintrin.h> instead."
#endif // __IMMINTRIN_H
#ifndef __AMX_TF32TRANSPOSEINTRIN_H

16
lib/include/andes_vector.h vendored Normal file
View File

@ -0,0 +1,16 @@
//===----- andes_vector.h - Andes Vector definitions ----------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
#ifndef _ANDES_VECTOR_H_
#define _ANDES_VECTOR_H_
#include "riscv_vector.h"
#pragma clang riscv intrinsic andes_vector
#endif //_ANDES_VECTOR_H_

View File

@ -29,47 +29,16 @@ extern "C" {
/* 7 SYNCHRONIZATION, BARRIER AND HINT INTRINSICS */
/* 7.3 Memory barriers */
#if !__has_builtin(__dmb)
#define __dmb(i) __builtin_arm_dmb(i)
#endif
#if !__has_builtin(__dsb)
#define __dsb(i) __builtin_arm_dsb(i)
#endif
#if !__has_builtin(__isb)
#define __isb(i) __builtin_arm_isb(i)
#endif
void __dmb(unsigned int);
void __dsb(unsigned int);
void __isb(unsigned int);
/* 7.4 Hints */
#if !__has_builtin(__wfi)
static __inline__ void __attribute__((__always_inline__, __nodebug__)) __wfi(void) {
__builtin_arm_wfi();
}
#endif
#if !__has_builtin(__wfe)
static __inline__ void __attribute__((__always_inline__, __nodebug__)) __wfe(void) {
__builtin_arm_wfe();
}
#endif
#if !__has_builtin(__sev)
static __inline__ void __attribute__((__always_inline__, __nodebug__)) __sev(void) {
__builtin_arm_sev();
}
#endif
#if !__has_builtin(__sevl)
static __inline__ void __attribute__((__always_inline__, __nodebug__)) __sevl(void) {
__builtin_arm_sevl();
}
#endif
#if !__has_builtin(__yield)
static __inline__ void __attribute__((__always_inline__, __nodebug__)) __yield(void) {
__builtin_arm_yield();
}
#endif
void __wfi(void);
void __wfe(void);
void __sev(void);
void __sevl(void);
void __yield(void);
#if defined(__ARM_32BIT_STATE) && __ARM_32BIT_STATE
#define __dbg(t) __builtin_arm_dbg(t)
@ -872,8 +841,9 @@ __gcspopm() {
return __builtin_arm_gcspopm(0);
}
static __inline__ const void * __attribute__((__always_inline__, __nodebug__, target("gcs")))
__gcsss(const void *__stack) {
static __inline__ void *__attribute__((__always_inline__, __nodebug__,
target("gcs")))
__gcsss(void *__stack) {
return __builtin_arm_gcsss(__stack);
}
#endif

178
lib/include/arm_fp16.h vendored
View File

@ -34,408 +34,408 @@ typedef __fp16 float16_t;
float16_t __ret; \
float16_t __s0 = __p0; \
float16_t __s1 = __p1; \
__ret = (float16_t) __builtin_neon_vabdh_f16(__s0, __s1); \
__ret = __builtin_bit_cast(float16_t, __builtin_neon_vabdh_f16(__s0, __s1)); \
__ret; \
})
#define vabsh_f16(__p0) __extension__ ({ \
float16_t __ret; \
float16_t __s0 = __p0; \
__ret = (float16_t) __builtin_neon_vabsh_f16(__s0); \
__ret = __builtin_bit_cast(float16_t, __builtin_neon_vabsh_f16(__s0)); \
__ret; \
})
#define vaddh_f16(__p0, __p1) __extension__ ({ \
float16_t __ret; \
float16_t __s0 = __p0; \
float16_t __s1 = __p1; \
__ret = (float16_t) __builtin_neon_vaddh_f16(__s0, __s1); \
__ret = __builtin_bit_cast(float16_t, __builtin_neon_vaddh_f16(__s0, __s1)); \
__ret; \
})
#define vcageh_f16(__p0, __p1) __extension__ ({ \
uint16_t __ret; \
float16_t __s0 = __p0; \
float16_t __s1 = __p1; \
__ret = (uint16_t) __builtin_neon_vcageh_f16(__s0, __s1); \
__ret = __builtin_bit_cast(uint16_t, __builtin_neon_vcageh_f16(__s0, __s1)); \
__ret; \
})
#define vcagth_f16(__p0, __p1) __extension__ ({ \
uint16_t __ret; \
float16_t __s0 = __p0; \
float16_t __s1 = __p1; \
__ret = (uint16_t) __builtin_neon_vcagth_f16(__s0, __s1); \
__ret = __builtin_bit_cast(uint16_t, __builtin_neon_vcagth_f16(__s0, __s1)); \
__ret; \
})
#define vcaleh_f16(__p0, __p1) __extension__ ({ \
uint16_t __ret; \
float16_t __s0 = __p0; \
float16_t __s1 = __p1; \
__ret = (uint16_t) __builtin_neon_vcaleh_f16(__s0, __s1); \
__ret = __builtin_bit_cast(uint16_t, __builtin_neon_vcaleh_f16(__s0, __s1)); \
__ret; \
})
#define vcalth_f16(__p0, __p1) __extension__ ({ \
uint16_t __ret; \
float16_t __s0 = __p0; \
float16_t __s1 = __p1; \
__ret = (uint16_t) __builtin_neon_vcalth_f16(__s0, __s1); \
__ret = __builtin_bit_cast(uint16_t, __builtin_neon_vcalth_f16(__s0, __s1)); \
__ret; \
})
#define vceqh_f16(__p0, __p1) __extension__ ({ \
uint16_t __ret; \
float16_t __s0 = __p0; \
float16_t __s1 = __p1; \
__ret = (uint16_t) __builtin_neon_vceqh_f16(__s0, __s1); \
__ret = __builtin_bit_cast(uint16_t, __builtin_neon_vceqh_f16(__s0, __s1)); \
__ret; \
})
#define vceqzh_f16(__p0) __extension__ ({ \
uint16_t __ret; \
float16_t __s0 = __p0; \
__ret = (uint16_t) __builtin_neon_vceqzh_f16(__s0); \
__ret = __builtin_bit_cast(uint16_t, __builtin_neon_vceqzh_f16(__s0)); \
__ret; \
})
#define vcgeh_f16(__p0, __p1) __extension__ ({ \
uint16_t __ret; \
float16_t __s0 = __p0; \
float16_t __s1 = __p1; \
__ret = (uint16_t) __builtin_neon_vcgeh_f16(__s0, __s1); \
__ret = __builtin_bit_cast(uint16_t, __builtin_neon_vcgeh_f16(__s0, __s1)); \
__ret; \
})
#define vcgezh_f16(__p0) __extension__ ({ \
uint16_t __ret; \
float16_t __s0 = __p0; \
__ret = (uint16_t) __builtin_neon_vcgezh_f16(__s0); \
__ret = __builtin_bit_cast(uint16_t, __builtin_neon_vcgezh_f16(__s0)); \
__ret; \
})
#define vcgth_f16(__p0, __p1) __extension__ ({ \
uint16_t __ret; \
float16_t __s0 = __p0; \
float16_t __s1 = __p1; \
__ret = (uint16_t) __builtin_neon_vcgth_f16(__s0, __s1); \
__ret = __builtin_bit_cast(uint16_t, __builtin_neon_vcgth_f16(__s0, __s1)); \
__ret; \
})
#define vcgtzh_f16(__p0) __extension__ ({ \
uint16_t __ret; \
float16_t __s0 = __p0; \
__ret = (uint16_t) __builtin_neon_vcgtzh_f16(__s0); \
__ret = __builtin_bit_cast(uint16_t, __builtin_neon_vcgtzh_f16(__s0)); \
__ret; \
})
#define vcleh_f16(__p0, __p1) __extension__ ({ \
uint16_t __ret; \
float16_t __s0 = __p0; \
float16_t __s1 = __p1; \
__ret = (uint16_t) __builtin_neon_vcleh_f16(__s0, __s1); \
__ret = __builtin_bit_cast(uint16_t, __builtin_neon_vcleh_f16(__s0, __s1)); \
__ret; \
})
#define vclezh_f16(__p0) __extension__ ({ \
uint16_t __ret; \
float16_t __s0 = __p0; \
__ret = (uint16_t) __builtin_neon_vclezh_f16(__s0); \
__ret = __builtin_bit_cast(uint16_t, __builtin_neon_vclezh_f16(__s0)); \
__ret; \
})
#define vclth_f16(__p0, __p1) __extension__ ({ \
uint16_t __ret; \
float16_t __s0 = __p0; \
float16_t __s1 = __p1; \
__ret = (uint16_t) __builtin_neon_vclth_f16(__s0, __s1); \
__ret = __builtin_bit_cast(uint16_t, __builtin_neon_vclth_f16(__s0, __s1)); \
__ret; \
})
#define vcltzh_f16(__p0) __extension__ ({ \
uint16_t __ret; \
float16_t __s0 = __p0; \
__ret = (uint16_t) __builtin_neon_vcltzh_f16(__s0); \
__ret = __builtin_bit_cast(uint16_t, __builtin_neon_vcltzh_f16(__s0)); \
__ret; \
})
#define vcvth_n_s16_f16(__p0, __p1) __extension__ ({ \
int16_t __ret; \
float16_t __s0 = __p0; \
__ret = (int16_t) __builtin_neon_vcvth_n_s16_f16(__s0, __p1); \
__ret = __builtin_bit_cast(int16_t, __builtin_neon_vcvth_n_s16_f16(__s0, __p1)); \
__ret; \
})
#define vcvth_n_s32_f16(__p0, __p1) __extension__ ({ \
int32_t __ret; \
float16_t __s0 = __p0; \
__ret = (int32_t) __builtin_neon_vcvth_n_s32_f16(__s0, __p1); \
__ret = __builtin_bit_cast(int32_t, __builtin_neon_vcvth_n_s32_f16(__s0, __p1)); \
__ret; \
})
#define vcvth_n_s64_f16(__p0, __p1) __extension__ ({ \
int64_t __ret; \
float16_t __s0 = __p0; \
__ret = (int64_t) __builtin_neon_vcvth_n_s64_f16(__s0, __p1); \
__ret = __builtin_bit_cast(int64_t, __builtin_neon_vcvth_n_s64_f16(__s0, __p1)); \
__ret; \
})
#define vcvth_n_u16_f16(__p0, __p1) __extension__ ({ \
uint16_t __ret; \
float16_t __s0 = __p0; \
__ret = (uint16_t) __builtin_neon_vcvth_n_u16_f16(__s0, __p1); \
__ret = __builtin_bit_cast(uint16_t, __builtin_neon_vcvth_n_u16_f16(__s0, __p1)); \
__ret; \
})
#define vcvth_n_u32_f16(__p0, __p1) __extension__ ({ \
uint32_t __ret; \
float16_t __s0 = __p0; \
__ret = (uint32_t) __builtin_neon_vcvth_n_u32_f16(__s0, __p1); \
__ret = __builtin_bit_cast(uint32_t, __builtin_neon_vcvth_n_u32_f16(__s0, __p1)); \
__ret; \
})
#define vcvth_n_u64_f16(__p0, __p1) __extension__ ({ \
uint64_t __ret; \
float16_t __s0 = __p0; \
__ret = (uint64_t) __builtin_neon_vcvth_n_u64_f16(__s0, __p1); \
__ret = __builtin_bit_cast(uint64_t, __builtin_neon_vcvth_n_u64_f16(__s0, __p1)); \
__ret; \
})
#define vcvth_s16_f16(__p0) __extension__ ({ \
int16_t __ret; \
float16_t __s0 = __p0; \
__ret = (int16_t) __builtin_neon_vcvth_s16_f16(__s0); \
__ret = __builtin_bit_cast(int16_t, __builtin_neon_vcvth_s16_f16(__s0)); \
__ret; \
})
#define vcvth_s32_f16(__p0) __extension__ ({ \
int32_t __ret; \
float16_t __s0 = __p0; \
__ret = (int32_t) __builtin_neon_vcvth_s32_f16(__s0); \
__ret = __builtin_bit_cast(int32_t, __builtin_neon_vcvth_s32_f16(__s0)); \
__ret; \
})
#define vcvth_s64_f16(__p0) __extension__ ({ \
int64_t __ret; \
float16_t __s0 = __p0; \
__ret = (int64_t) __builtin_neon_vcvth_s64_f16(__s0); \
__ret = __builtin_bit_cast(int64_t, __builtin_neon_vcvth_s64_f16(__s0)); \
__ret; \
})
#define vcvth_u16_f16(__p0) __extension__ ({ \
uint16_t __ret; \
float16_t __s0 = __p0; \
__ret = (uint16_t) __builtin_neon_vcvth_u16_f16(__s0); \
__ret = __builtin_bit_cast(uint16_t, __builtin_neon_vcvth_u16_f16(__s0)); \
__ret; \
})
#define vcvth_u32_f16(__p0) __extension__ ({ \
uint32_t __ret; \
float16_t __s0 = __p0; \
__ret = (uint32_t) __builtin_neon_vcvth_u32_f16(__s0); \
__ret = __builtin_bit_cast(uint32_t, __builtin_neon_vcvth_u32_f16(__s0)); \
__ret; \
})
#define vcvth_u64_f16(__p0) __extension__ ({ \
uint64_t __ret; \
float16_t __s0 = __p0; \
__ret = (uint64_t) __builtin_neon_vcvth_u64_f16(__s0); \
__ret = __builtin_bit_cast(uint64_t, __builtin_neon_vcvth_u64_f16(__s0)); \
__ret; \
})
#define vcvtah_s16_f16(__p0) __extension__ ({ \
int16_t __ret; \
float16_t __s0 = __p0; \
__ret = (int16_t) __builtin_neon_vcvtah_s16_f16(__s0); \
__ret = __builtin_bit_cast(int16_t, __builtin_neon_vcvtah_s16_f16(__s0)); \
__ret; \
})
#define vcvtah_s32_f16(__p0) __extension__ ({ \
int32_t __ret; \
float16_t __s0 = __p0; \
__ret = (int32_t) __builtin_neon_vcvtah_s32_f16(__s0); \
__ret = __builtin_bit_cast(int32_t, __builtin_neon_vcvtah_s32_f16(__s0)); \
__ret; \
})
#define vcvtah_s64_f16(__p0) __extension__ ({ \
int64_t __ret; \
float16_t __s0 = __p0; \
__ret = (int64_t) __builtin_neon_vcvtah_s64_f16(__s0); \
__ret = __builtin_bit_cast(int64_t, __builtin_neon_vcvtah_s64_f16(__s0)); \
__ret; \
})
#define vcvtah_u16_f16(__p0) __extension__ ({ \
uint16_t __ret; \
float16_t __s0 = __p0; \
__ret = (uint16_t) __builtin_neon_vcvtah_u16_f16(__s0); \
__ret = __builtin_bit_cast(uint16_t, __builtin_neon_vcvtah_u16_f16(__s0)); \
__ret; \
})
#define vcvtah_u32_f16(__p0) __extension__ ({ \
uint32_t __ret; \
float16_t __s0 = __p0; \
__ret = (uint32_t) __builtin_neon_vcvtah_u32_f16(__s0); \
__ret = __builtin_bit_cast(uint32_t, __builtin_neon_vcvtah_u32_f16(__s0)); \
__ret; \
})
#define vcvtah_u64_f16(__p0) __extension__ ({ \
uint64_t __ret; \
float16_t __s0 = __p0; \
__ret = (uint64_t) __builtin_neon_vcvtah_u64_f16(__s0); \
__ret = __builtin_bit_cast(uint64_t, __builtin_neon_vcvtah_u64_f16(__s0)); \
__ret; \
})
#define vcvth_f16_u16(__p0) __extension__ ({ \
float16_t __ret; \
uint16_t __s0 = __p0; \
__ret = (float16_t) __builtin_neon_vcvth_f16_u16(__s0); \
__ret = __builtin_bit_cast(float16_t, __builtin_neon_vcvth_f16_u16(__s0)); \
__ret; \
})
#define vcvth_f16_s16(__p0) __extension__ ({ \
float16_t __ret; \
int16_t __s0 = __p0; \
__ret = (float16_t) __builtin_neon_vcvth_f16_s16(__s0); \
__ret = __builtin_bit_cast(float16_t, __builtin_neon_vcvth_f16_s16(__s0)); \
__ret; \
})
#define vcvth_f16_u32(__p0) __extension__ ({ \
float16_t __ret; \
uint32_t __s0 = __p0; \
__ret = (float16_t) __builtin_neon_vcvth_f16_u32(__s0); \
__ret = __builtin_bit_cast(float16_t, __builtin_neon_vcvth_f16_u32(__s0)); \
__ret; \
})
#define vcvth_f16_s32(__p0) __extension__ ({ \
float16_t __ret; \
int32_t __s0 = __p0; \
__ret = (float16_t) __builtin_neon_vcvth_f16_s32(__s0); \
__ret = __builtin_bit_cast(float16_t, __builtin_neon_vcvth_f16_s32(__s0)); \
__ret; \
})
#define vcvth_f16_u64(__p0) __extension__ ({ \
float16_t __ret; \
uint64_t __s0 = __p0; \
__ret = (float16_t) __builtin_neon_vcvth_f16_u64(__s0); \
__ret = __builtin_bit_cast(float16_t, __builtin_neon_vcvth_f16_u64(__s0)); \
__ret; \
})
#define vcvth_f16_s64(__p0) __extension__ ({ \
float16_t __ret; \
int64_t __s0 = __p0; \
__ret = (float16_t) __builtin_neon_vcvth_f16_s64(__s0); \
__ret = __builtin_bit_cast(float16_t, __builtin_neon_vcvth_f16_s64(__s0)); \
__ret; \
})
#define vcvth_n_f16_u32(__p0, __p1) __extension__ ({ \
float16_t __ret; \
uint32_t __s0 = __p0; \
__ret = (float16_t) __builtin_neon_vcvth_n_f16_u32(__s0, __p1); \
__ret = __builtin_bit_cast(float16_t, __builtin_neon_vcvth_n_f16_u32(__s0, __p1)); \
__ret; \
})
#define vcvth_n_f16_s32(__p0, __p1) __extension__ ({ \
float16_t __ret; \
int32_t __s0 = __p0; \
__ret = (float16_t) __builtin_neon_vcvth_n_f16_s32(__s0, __p1); \
__ret = __builtin_bit_cast(float16_t, __builtin_neon_vcvth_n_f16_s32(__s0, __p1)); \
__ret; \
})
#define vcvth_n_f16_u64(__p0, __p1) __extension__ ({ \
float16_t __ret; \
uint64_t __s0 = __p0; \
__ret = (float16_t) __builtin_neon_vcvth_n_f16_u64(__s0, __p1); \
__ret = __builtin_bit_cast(float16_t, __builtin_neon_vcvth_n_f16_u64(__s0, __p1)); \
__ret; \
})
#define vcvth_n_f16_s64(__p0, __p1) __extension__ ({ \
float16_t __ret; \
int64_t __s0 = __p0; \
__ret = (float16_t) __builtin_neon_vcvth_n_f16_s64(__s0, __p1); \
__ret = __builtin_bit_cast(float16_t, __builtin_neon_vcvth_n_f16_s64(__s0, __p1)); \
__ret; \
})
#define vcvth_n_f16_u16(__p0, __p1) __extension__ ({ \
float16_t __ret; \
uint16_t __s0 = __p0; \
__ret = (float16_t) __builtin_neon_vcvth_n_f16_u16(__s0, __p1); \
__ret = __builtin_bit_cast(float16_t, __builtin_neon_vcvth_n_f16_u16(__s0, __p1)); \
__ret; \
})
#define vcvth_n_f16_s16(__p0, __p1) __extension__ ({ \
float16_t __ret; \
int16_t __s0 = __p0; \
__ret = (float16_t) __builtin_neon_vcvth_n_f16_s16(__s0, __p1); \
__ret = __builtin_bit_cast(float16_t, __builtin_neon_vcvth_n_f16_s16(__s0, __p1)); \
__ret; \
})
#define vcvtmh_s16_f16(__p0) __extension__ ({ \
int16_t __ret; \
float16_t __s0 = __p0; \
__ret = (int16_t) __builtin_neon_vcvtmh_s16_f16(__s0); \
__ret = __builtin_bit_cast(int16_t, __builtin_neon_vcvtmh_s16_f16(__s0)); \
__ret; \
})
#define vcvtmh_s32_f16(__p0) __extension__ ({ \
int32_t __ret; \
float16_t __s0 = __p0; \
__ret = (int32_t) __builtin_neon_vcvtmh_s32_f16(__s0); \
__ret = __builtin_bit_cast(int32_t, __builtin_neon_vcvtmh_s32_f16(__s0)); \
__ret; \
})
#define vcvtmh_s64_f16(__p0) __extension__ ({ \
int64_t __ret; \
float16_t __s0 = __p0; \
__ret = (int64_t) __builtin_neon_vcvtmh_s64_f16(__s0); \
__ret = __builtin_bit_cast(int64_t, __builtin_neon_vcvtmh_s64_f16(__s0)); \
__ret; \
})
#define vcvtmh_u16_f16(__p0) __extension__ ({ \
uint16_t __ret; \
float16_t __s0 = __p0; \
__ret = (uint16_t) __builtin_neon_vcvtmh_u16_f16(__s0); \
__ret = __builtin_bit_cast(uint16_t, __builtin_neon_vcvtmh_u16_f16(__s0)); \
__ret; \
})
#define vcvtmh_u32_f16(__p0) __extension__ ({ \
uint32_t __ret; \
float16_t __s0 = __p0; \
__ret = (uint32_t) __builtin_neon_vcvtmh_u32_f16(__s0); \
__ret = __builtin_bit_cast(uint32_t, __builtin_neon_vcvtmh_u32_f16(__s0)); \
__ret; \
})
#define vcvtmh_u64_f16(__p0) __extension__ ({ \
uint64_t __ret; \
float16_t __s0 = __p0; \
__ret = (uint64_t) __builtin_neon_vcvtmh_u64_f16(__s0); \
__ret = __builtin_bit_cast(uint64_t, __builtin_neon_vcvtmh_u64_f16(__s0)); \
__ret; \
})
#define vcvtnh_s16_f16(__p0) __extension__ ({ \
int16_t __ret; \
float16_t __s0 = __p0; \
__ret = (int16_t) __builtin_neon_vcvtnh_s16_f16(__s0); \
__ret = __builtin_bit_cast(int16_t, __builtin_neon_vcvtnh_s16_f16(__s0)); \
__ret; \
})
#define vcvtnh_s32_f16(__p0) __extension__ ({ \
int32_t __ret; \
float16_t __s0 = __p0; \
__ret = (int32_t) __builtin_neon_vcvtnh_s32_f16(__s0); \
__ret = __builtin_bit_cast(int32_t, __builtin_neon_vcvtnh_s32_f16(__s0)); \
__ret; \
})
#define vcvtnh_s64_f16(__p0) __extension__ ({ \
int64_t __ret; \
float16_t __s0 = __p0; \
__ret = (int64_t) __builtin_neon_vcvtnh_s64_f16(__s0); \
__ret = __builtin_bit_cast(int64_t, __builtin_neon_vcvtnh_s64_f16(__s0)); \
__ret; \
})
#define vcvtnh_u16_f16(__p0) __extension__ ({ \
uint16_t __ret; \
float16_t __s0 = __p0; \
__ret = (uint16_t) __builtin_neon_vcvtnh_u16_f16(__s0); \
__ret = __builtin_bit_cast(uint16_t, __builtin_neon_vcvtnh_u16_f16(__s0)); \
__ret; \
})
#define vcvtnh_u32_f16(__p0) __extension__ ({ \
uint32_t __ret; \
float16_t __s0 = __p0; \
__ret = (uint32_t) __builtin_neon_vcvtnh_u32_f16(__s0); \
__ret = __builtin_bit_cast(uint32_t, __builtin_neon_vcvtnh_u32_f16(__s0)); \
__ret; \
})
#define vcvtnh_u64_f16(__p0) __extension__ ({ \
uint64_t __ret; \
float16_t __s0 = __p0; \
__ret = (uint64_t) __builtin_neon_vcvtnh_u64_f16(__s0); \
__ret = __builtin_bit_cast(uint64_t, __builtin_neon_vcvtnh_u64_f16(__s0)); \
__ret; \
})
#define vcvtph_s16_f16(__p0) __extension__ ({ \
int16_t __ret; \
float16_t __s0 = __p0; \
__ret = (int16_t) __builtin_neon_vcvtph_s16_f16(__s0); \
__ret = __builtin_bit_cast(int16_t, __builtin_neon_vcvtph_s16_f16(__s0)); \
__ret; \
})
#define vcvtph_s32_f16(__p0) __extension__ ({ \
int32_t __ret; \
float16_t __s0 = __p0; \
__ret = (int32_t) __builtin_neon_vcvtph_s32_f16(__s0); \
__ret = __builtin_bit_cast(int32_t, __builtin_neon_vcvtph_s32_f16(__s0)); \
__ret; \
})
#define vcvtph_s64_f16(__p0) __extension__ ({ \
int64_t __ret; \
float16_t __s0 = __p0; \
__ret = (int64_t) __builtin_neon_vcvtph_s64_f16(__s0); \
__ret = __builtin_bit_cast(int64_t, __builtin_neon_vcvtph_s64_f16(__s0)); \
__ret; \
})
#define vcvtph_u16_f16(__p0) __extension__ ({ \
uint16_t __ret; \
float16_t __s0 = __p0; \
__ret = (uint16_t) __builtin_neon_vcvtph_u16_f16(__s0); \
__ret = __builtin_bit_cast(uint16_t, __builtin_neon_vcvtph_u16_f16(__s0)); \
__ret; \
})
#define vcvtph_u32_f16(__p0) __extension__ ({ \
uint32_t __ret; \
float16_t __s0 = __p0; \
__ret = (uint32_t) __builtin_neon_vcvtph_u32_f16(__s0); \
__ret = __builtin_bit_cast(uint32_t, __builtin_neon_vcvtph_u32_f16(__s0)); \
__ret; \
})
#define vcvtph_u64_f16(__p0) __extension__ ({ \
uint64_t __ret; \
float16_t __s0 = __p0; \
__ret = (uint64_t) __builtin_neon_vcvtph_u64_f16(__s0); \
__ret = __builtin_bit_cast(uint64_t, __builtin_neon_vcvtph_u64_f16(__s0)); \
__ret; \
})
#define vdivh_f16(__p0, __p1) __extension__ ({ \
float16_t __ret; \
float16_t __s0 = __p0; \
float16_t __s1 = __p1; \
__ret = (float16_t) __builtin_neon_vdivh_f16(__s0, __s1); \
__ret = __builtin_bit_cast(float16_t, __builtin_neon_vdivh_f16(__s0, __s1)); \
__ret; \
})
#define vfmah_f16(__p0, __p1, __p2) __extension__ ({ \
@ -443,7 +443,7 @@ typedef __fp16 float16_t;
float16_t __s0 = __p0; \
float16_t __s1 = __p1; \
float16_t __s2 = __p2; \
__ret = (float16_t) __builtin_neon_vfmah_f16(__s0, __s1, __s2); \
__ret = __builtin_bit_cast(float16_t, __builtin_neon_vfmah_f16(__s0, __s1, __s2)); \
__ret; \
})
#define vfmsh_f16(__p0, __p1, __p2) __extension__ ({ \
@ -451,142 +451,142 @@ typedef __fp16 float16_t;
float16_t __s0 = __p0; \
float16_t __s1 = __p1; \
float16_t __s2 = __p2; \
__ret = (float16_t) __builtin_neon_vfmsh_f16(__s0, __s1, __s2); \
__ret = __builtin_bit_cast(float16_t, __builtin_neon_vfmsh_f16(__s0, __s1, __s2)); \
__ret; \
})
#define vmaxh_f16(__p0, __p1) __extension__ ({ \
float16_t __ret; \
float16_t __s0 = __p0; \
float16_t __s1 = __p1; \
__ret = (float16_t) __builtin_neon_vmaxh_f16(__s0, __s1); \
__ret = __builtin_bit_cast(float16_t, __builtin_neon_vmaxh_f16(__s0, __s1)); \
__ret; \
})
#define vmaxnmh_f16(__p0, __p1) __extension__ ({ \
float16_t __ret; \
float16_t __s0 = __p0; \
float16_t __s1 = __p1; \
__ret = (float16_t) __builtin_neon_vmaxnmh_f16(__s0, __s1); \
__ret = __builtin_bit_cast(float16_t, __builtin_neon_vmaxnmh_f16(__s0, __s1)); \
__ret; \
})
#define vminh_f16(__p0, __p1) __extension__ ({ \
float16_t __ret; \
float16_t __s0 = __p0; \
float16_t __s1 = __p1; \
__ret = (float16_t) __builtin_neon_vminh_f16(__s0, __s1); \
__ret = __builtin_bit_cast(float16_t, __builtin_neon_vminh_f16(__s0, __s1)); \
__ret; \
})
#define vminnmh_f16(__p0, __p1) __extension__ ({ \
float16_t __ret; \
float16_t __s0 = __p0; \
float16_t __s1 = __p1; \
__ret = (float16_t) __builtin_neon_vminnmh_f16(__s0, __s1); \
__ret = __builtin_bit_cast(float16_t, __builtin_neon_vminnmh_f16(__s0, __s1)); \
__ret; \
})
#define vmulh_f16(__p0, __p1) __extension__ ({ \
float16_t __ret; \
float16_t __s0 = __p0; \
float16_t __s1 = __p1; \
__ret = (float16_t) __builtin_neon_vmulh_f16(__s0, __s1); \
__ret = __builtin_bit_cast(float16_t, __builtin_neon_vmulh_f16(__s0, __s1)); \
__ret; \
})
#define vmulxh_f16(__p0, __p1) __extension__ ({ \
float16_t __ret; \
float16_t __s0 = __p0; \
float16_t __s1 = __p1; \
__ret = (float16_t) __builtin_neon_vmulxh_f16(__s0, __s1); \
__ret = __builtin_bit_cast(float16_t, __builtin_neon_vmulxh_f16(__s0, __s1)); \
__ret; \
})
#define vnegh_f16(__p0) __extension__ ({ \
float16_t __ret; \
float16_t __s0 = __p0; \
__ret = (float16_t) __builtin_neon_vnegh_f16(__s0); \
__ret = __builtin_bit_cast(float16_t, __builtin_neon_vnegh_f16(__s0)); \
__ret; \
})
#define vrecpeh_f16(__p0) __extension__ ({ \
float16_t __ret; \
float16_t __s0 = __p0; \
__ret = (float16_t) __builtin_neon_vrecpeh_f16(__s0); \
__ret = __builtin_bit_cast(float16_t, __builtin_neon_vrecpeh_f16(__s0)); \
__ret; \
})
#define vrecpsh_f16(__p0, __p1) __extension__ ({ \
float16_t __ret; \
float16_t __s0 = __p0; \
float16_t __s1 = __p1; \
__ret = (float16_t) __builtin_neon_vrecpsh_f16(__s0, __s1); \
__ret = __builtin_bit_cast(float16_t, __builtin_neon_vrecpsh_f16(__s0, __s1)); \
__ret; \
})
#define vrecpxh_f16(__p0) __extension__ ({ \
float16_t __ret; \
float16_t __s0 = __p0; \
__ret = (float16_t) __builtin_neon_vrecpxh_f16(__s0); \
__ret = __builtin_bit_cast(float16_t, __builtin_neon_vrecpxh_f16(__s0)); \
__ret; \
})
#define vrndh_f16(__p0) __extension__ ({ \
float16_t __ret; \
float16_t __s0 = __p0; \
__ret = (float16_t) __builtin_neon_vrndh_f16(__s0); \
__ret = __builtin_bit_cast(float16_t, __builtin_neon_vrndh_f16(__s0)); \
__ret; \
})
#define vrndah_f16(__p0) __extension__ ({ \
float16_t __ret; \
float16_t __s0 = __p0; \
__ret = (float16_t) __builtin_neon_vrndah_f16(__s0); \
__ret = __builtin_bit_cast(float16_t, __builtin_neon_vrndah_f16(__s0)); \
__ret; \
})
#define vrndih_f16(__p0) __extension__ ({ \
float16_t __ret; \
float16_t __s0 = __p0; \
__ret = (float16_t) __builtin_neon_vrndih_f16(__s0); \
__ret = __builtin_bit_cast(float16_t, __builtin_neon_vrndih_f16(__s0)); \
__ret; \
})
#define vrndmh_f16(__p0) __extension__ ({ \
float16_t __ret; \
float16_t __s0 = __p0; \
__ret = (float16_t) __builtin_neon_vrndmh_f16(__s0); \
__ret = __builtin_bit_cast(float16_t, __builtin_neon_vrndmh_f16(__s0)); \
__ret; \
})
#define vrndnh_f16(__p0) __extension__ ({ \
float16_t __ret; \
float16_t __s0 = __p0; \
__ret = (float16_t) __builtin_neon_vrndnh_f16(__s0); \
__ret = __builtin_bit_cast(float16_t, __builtin_neon_vrndnh_f16(__s0)); \
__ret; \
})
#define vrndph_f16(__p0) __extension__ ({ \
float16_t __ret; \
float16_t __s0 = __p0; \
__ret = (float16_t) __builtin_neon_vrndph_f16(__s0); \
__ret = __builtin_bit_cast(float16_t, __builtin_neon_vrndph_f16(__s0)); \
__ret; \
})
#define vrndxh_f16(__p0) __extension__ ({ \
float16_t __ret; \
float16_t __s0 = __p0; \
__ret = (float16_t) __builtin_neon_vrndxh_f16(__s0); \
__ret = __builtin_bit_cast(float16_t, __builtin_neon_vrndxh_f16(__s0)); \
__ret; \
})
#define vrsqrteh_f16(__p0) __extension__ ({ \
float16_t __ret; \
float16_t __s0 = __p0; \
__ret = (float16_t) __builtin_neon_vrsqrteh_f16(__s0); \
__ret = __builtin_bit_cast(float16_t, __builtin_neon_vrsqrteh_f16(__s0)); \
__ret; \
})
#define vrsqrtsh_f16(__p0, __p1) __extension__ ({ \
float16_t __ret; \
float16_t __s0 = __p0; \
float16_t __s1 = __p1; \
__ret = (float16_t) __builtin_neon_vrsqrtsh_f16(__s0, __s1); \
__ret = __builtin_bit_cast(float16_t, __builtin_neon_vrsqrtsh_f16(__s0, __s1)); \
__ret; \
})
#define vsqrth_f16(__p0) __extension__ ({ \
float16_t __ret; \
float16_t __s0 = __p0; \
__ret = (float16_t) __builtin_neon_vsqrth_f16(__s0); \
__ret = __builtin_bit_cast(float16_t, __builtin_neon_vsqrth_f16(__s0)); \
__ret; \
})
#define vsubh_f16(__p0, __p1) __extension__ ({ \
float16_t __ret; \
float16_t __s0 = __p0; \
float16_t __s1 = __p1; \
__ret = (float16_t) __builtin_neon_vsubh_f16(__s0, __s1); \
__ret = __builtin_bit_cast(float16_t, __builtin_neon_vsubh_f16(__s0, __s1)); \
__ret; \
})
#endif

42776
lib/include/arm_neon.h vendored

File diff suppressed because it is too large Load Diff

694
lib/include/arm_sme.h vendored
View File

@ -146,6 +146,8 @@ __ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_hor_za128_s32
svint32_t svread_hor_za128_s32_m(svint32_t, svbool_t, uint64_t, uint32_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_hor_za128_s64_m)))
svint64_t svread_hor_za128_s64_m(svint64_t, svbool_t, uint64_t, uint32_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_hor_za128_mf8_m)))
svmfloat8_t svread_hor_za128_mf8_m(svmfloat8_t, svbool_t, uint64_t, uint32_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_hor_za128_s16_m)))
svint16_t svread_hor_za128_s16_m(svint16_t, svbool_t, uint64_t, uint32_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_hor_za16_u16_m)))
@ -172,6 +174,8 @@ __ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_hor_za8_u8_m)
svuint8_t svread_hor_za8_u8_m(svuint8_t, svbool_t, uint64_t, uint32_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_hor_za8_s8_m)))
svint8_t svread_hor_za8_s8_m(svint8_t, svbool_t, uint64_t, uint32_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_hor_za8_mf8_m)))
svmfloat8_t svread_hor_za8_mf8_m(svmfloat8_t, svbool_t, uint64_t, uint32_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_ver_za128_u8_m)))
svuint8_t svread_ver_za128_u8_m(svuint8_t, svbool_t, uint64_t, uint32_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_ver_za128_u32_m)))
@ -194,6 +198,8 @@ __ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_ver_za128_s32
svint32_t svread_ver_za128_s32_m(svint32_t, svbool_t, uint64_t, uint32_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_ver_za128_s64_m)))
svint64_t svread_ver_za128_s64_m(svint64_t, svbool_t, uint64_t, uint32_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_ver_za128_mf8_m)))
svmfloat8_t svread_ver_za128_mf8_m(svmfloat8_t, svbool_t, uint64_t, uint32_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_ver_za128_s16_m)))
svint16_t svread_ver_za128_s16_m(svint16_t, svbool_t, uint64_t, uint32_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_ver_za16_u16_m)))
@ -220,6 +226,8 @@ __ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_ver_za8_u8_m)
svuint8_t svread_ver_za8_u8_m(svuint8_t, svbool_t, uint64_t, uint32_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_ver_za8_s8_m)))
svint8_t svread_ver_za8_s8_m(svint8_t, svbool_t, uint64_t, uint32_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_ver_za8_mf8_m)))
svmfloat8_t svread_ver_za8_mf8_m(svmfloat8_t, svbool_t, uint64_t, uint32_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svst1_hor_vnum_za128)))
void svst1_hor_vnum_za128(uint64_t, uint32_t, svbool_t, void *, int64_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svst1_hor_vnum_za16)))
@ -294,6 +302,8 @@ __ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za128_s3
void svwrite_hor_za128_s32_m(uint64_t, uint32_t, svbool_t, svint32_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za128_s64_m)))
void svwrite_hor_za128_s64_m(uint64_t, uint32_t, svbool_t, svint64_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za128_mf8_m)))
void svwrite_hor_za128_mf8_m(uint64_t, uint32_t, svbool_t, svmfloat8_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za128_s16_m)))
void svwrite_hor_za128_s16_m(uint64_t, uint32_t, svbool_t, svint16_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za16_u16_m)))
@ -320,6 +330,8 @@ __ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za8_u8_m
void svwrite_hor_za8_u8_m(uint64_t, uint32_t, svbool_t, svuint8_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za8_s8_m)))
void svwrite_hor_za8_s8_m(uint64_t, uint32_t, svbool_t, svint8_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za8_mf8_m)))
void svwrite_hor_za8_mf8_m(uint64_t, uint32_t, svbool_t, svmfloat8_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za128_u8_m)))
void svwrite_ver_za128_u8_m(uint64_t, uint32_t, svbool_t, svuint8_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za128_u32_m)))
@ -342,6 +354,8 @@ __ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za128_s3
void svwrite_ver_za128_s32_m(uint64_t, uint32_t, svbool_t, svint32_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za128_s64_m)))
void svwrite_ver_za128_s64_m(uint64_t, uint32_t, svbool_t, svint64_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za128_mf8_m)))
void svwrite_ver_za128_mf8_m(uint64_t, uint32_t, svbool_t, svmfloat8_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za128_s16_m)))
void svwrite_ver_za128_s16_m(uint64_t, uint32_t, svbool_t, svint16_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za16_u16_m)))
@ -368,6 +382,8 @@ __ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za8_u8_m
void svwrite_ver_za8_u8_m(uint64_t, uint32_t, svbool_t, svuint8_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za8_s8_m)))
void svwrite_ver_za8_s8_m(uint64_t, uint32_t, svbool_t, svint8_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za8_mf8_m)))
void svwrite_ver_za8_mf8_m(uint64_t, uint32_t, svbool_t, svmfloat8_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svzero_mask_za)))
void svzero_mask_za(uint64_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svzero_za)))
@ -422,6 +438,8 @@ __aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_hor_za128_s3
svint32_t svread_hor_za128_m(svint32_t, svbool_t, uint64_t, uint32_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_hor_za128_s64_m)))
svint64_t svread_hor_za128_m(svint64_t, svbool_t, uint64_t, uint32_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_hor_za128_mf8_m)))
svmfloat8_t svread_hor_za128_m(svmfloat8_t, svbool_t, uint64_t, uint32_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_hor_za128_s16_m)))
svint16_t svread_hor_za128_m(svint16_t, svbool_t, uint64_t, uint32_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_hor_za16_u16_m)))
@ -448,6 +466,8 @@ __aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_hor_za8_u8_m
svuint8_t svread_hor_za8_m(svuint8_t, svbool_t, uint64_t, uint32_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_hor_za8_s8_m)))
svint8_t svread_hor_za8_m(svint8_t, svbool_t, uint64_t, uint32_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_hor_za8_mf8_m)))
svmfloat8_t svread_hor_za8_m(svmfloat8_t, svbool_t, uint64_t, uint32_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_ver_za128_u8_m)))
svuint8_t svread_ver_za128_m(svuint8_t, svbool_t, uint64_t, uint32_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_ver_za128_u32_m)))
@ -470,6 +490,8 @@ __aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_ver_za128_s3
svint32_t svread_ver_za128_m(svint32_t, svbool_t, uint64_t, uint32_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_ver_za128_s64_m)))
svint64_t svread_ver_za128_m(svint64_t, svbool_t, uint64_t, uint32_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_ver_za128_mf8_m)))
svmfloat8_t svread_ver_za128_m(svmfloat8_t, svbool_t, uint64_t, uint32_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_ver_za128_s16_m)))
svint16_t svread_ver_za128_m(svint16_t, svbool_t, uint64_t, uint32_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_ver_za16_u16_m)))
@ -496,6 +518,8 @@ __aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_ver_za8_u8_m
svuint8_t svread_ver_za8_m(svuint8_t, svbool_t, uint64_t, uint32_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_ver_za8_s8_m)))
svint8_t svread_ver_za8_m(svint8_t, svbool_t, uint64_t, uint32_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_ver_za8_mf8_m)))
svmfloat8_t svread_ver_za8_m(svmfloat8_t, svbool_t, uint64_t, uint32_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svsumopa_za32_s8_m)))
void svsumopa_za32_m(uint64_t, svbool_t, svbool_t, svint8_t, svuint8_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svsumops_za32_s8_m)))
@ -526,6 +550,8 @@ __aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za128_s
void svwrite_hor_za128_m(uint64_t, uint32_t, svbool_t, svint32_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za128_s64_m)))
void svwrite_hor_za128_m(uint64_t, uint32_t, svbool_t, svint64_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za128_mf8_m)))
void svwrite_hor_za128_m(uint64_t, uint32_t, svbool_t, svmfloat8_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za128_s16_m)))
void svwrite_hor_za128_m(uint64_t, uint32_t, svbool_t, svint16_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za16_u16_m)))
@ -552,6 +578,8 @@ __aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za8_u8_
void svwrite_hor_za8_m(uint64_t, uint32_t, svbool_t, svuint8_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za8_s8_m)))
void svwrite_hor_za8_m(uint64_t, uint32_t, svbool_t, svint8_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za8_mf8_m)))
void svwrite_hor_za8_m(uint64_t, uint32_t, svbool_t, svmfloat8_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za128_u8_m)))
void svwrite_ver_za128_m(uint64_t, uint32_t, svbool_t, svuint8_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za128_u32_m)))
@ -574,6 +602,8 @@ __aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za128_s
void svwrite_ver_za128_m(uint64_t, uint32_t, svbool_t, svint32_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za128_s64_m)))
void svwrite_ver_za128_m(uint64_t, uint32_t, svbool_t, svint64_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za128_mf8_m)))
void svwrite_ver_za128_m(uint64_t, uint32_t, svbool_t, svmfloat8_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za128_s16_m)))
void svwrite_ver_za128_m(uint64_t, uint32_t, svbool_t, svint16_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za16_u16_m)))
@ -600,6 +630,8 @@ __aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za8_u8_
void svwrite_ver_za8_m(uint64_t, uint32_t, svbool_t, svuint8_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za8_s8_m)))
void svwrite_ver_za8_m(uint64_t, uint32_t, svbool_t, svint8_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za8_mf8_m)))
void svwrite_ver_za8_m(uint64_t, uint32_t, svbool_t, svmfloat8_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svadd_za16_f16_vg1x2)))
void svadd_za16_f16_vg1x2(uint32_t, svfloat16x2_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svadd_za16_f16_vg1x4)))
@ -1158,6 +1190,8 @@ __ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svluti2_lane_zt_f16)
svfloat16_t svluti2_lane_zt_f16(uint64_t, svuint8_t, uint64_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svluti2_lane_zt_s32)))
svint32_t svluti2_lane_zt_s32(uint64_t, svuint8_t, uint64_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svluti2_lane_zt_mf8)))
svmfloat8_t svluti2_lane_zt_mf8(uint64_t, svuint8_t, uint64_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svluti2_lane_zt_s16)))
svint16_t svluti2_lane_zt_s16(uint64_t, svuint8_t, uint64_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svluti2_lane_zt_u8_x2)))
@ -1176,6 +1210,8 @@ __ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svluti2_lane_zt_f16_
svfloat16x2_t svluti2_lane_zt_f16_x2(uint64_t, svuint8_t, uint64_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svluti2_lane_zt_s32_x2)))
svint32x2_t svluti2_lane_zt_s32_x2(uint64_t, svuint8_t, uint64_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svluti2_lane_zt_mf8_x2)))
svmfloat8x2_t svluti2_lane_zt_mf8_x2(uint64_t, svuint8_t, uint64_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svluti2_lane_zt_s16_x2)))
svint16x2_t svluti2_lane_zt_s16_x2(uint64_t, svuint8_t, uint64_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svluti2_lane_zt_u8_x4)))
@ -1194,6 +1230,8 @@ __ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svluti2_lane_zt_f16_
svfloat16x4_t svluti2_lane_zt_f16_x4(uint64_t, svuint8_t, uint64_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svluti2_lane_zt_s32_x4)))
svint32x4_t svluti2_lane_zt_s32_x4(uint64_t, svuint8_t, uint64_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svluti2_lane_zt_mf8_x4)))
svmfloat8x4_t svluti2_lane_zt_mf8_x4(uint64_t, svuint8_t, uint64_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svluti2_lane_zt_s16_x4)))
svint16x4_t svluti2_lane_zt_s16_x4(uint64_t, svuint8_t, uint64_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svluti4_lane_zt_u8)))
@ -1212,6 +1250,8 @@ __ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svluti4_lane_zt_f16)
svfloat16_t svluti4_lane_zt_f16(uint64_t, svuint8_t, uint64_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svluti4_lane_zt_s32)))
svint32_t svluti4_lane_zt_s32(uint64_t, svuint8_t, uint64_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svluti4_lane_zt_mf8)))
svmfloat8_t svluti4_lane_zt_mf8(uint64_t, svuint8_t, uint64_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svluti4_lane_zt_s16)))
svint16_t svluti4_lane_zt_s16(uint64_t, svuint8_t, uint64_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svluti4_lane_zt_u8_x2)))
@ -1230,6 +1270,8 @@ __ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svluti4_lane_zt_f16_
svfloat16x2_t svluti4_lane_zt_f16_x2(uint64_t, svuint8_t, uint64_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svluti4_lane_zt_s32_x2)))
svint32x2_t svluti4_lane_zt_s32_x2(uint64_t, svuint8_t, uint64_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svluti4_lane_zt_mf8_x2)))
svmfloat8x2_t svluti4_lane_zt_mf8_x2(uint64_t, svuint8_t, uint64_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svluti4_lane_zt_s16_x2)))
svint16x2_t svluti4_lane_zt_s16_x2(uint64_t, svuint8_t, uint64_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svluti4_lane_zt_u32_x4)))
@ -1514,10 +1556,14 @@ __ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_hor_za8_u8_vg
svuint8x2_t svread_hor_za8_u8_vg2(uint64_t, uint32_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_hor_za8_s8_vg2)))
svint8x2_t svread_hor_za8_s8_vg2(uint64_t, uint32_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_hor_za8_mf8_vg2)))
svmfloat8x2_t svread_hor_za8_mf8_vg2(uint64_t, uint32_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_hor_za8_u8_vg4)))
svuint8x4_t svread_hor_za8_u8_vg4(uint64_t, uint32_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_hor_za8_s8_vg4)))
svint8x4_t svread_hor_za8_s8_vg4(uint64_t, uint32_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_hor_za8_mf8_vg4)))
svmfloat8x4_t svread_hor_za8_mf8_vg4(uint64_t, uint32_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_ver_za16_u16_vg2)))
svuint16x2_t svread_ver_za16_u16_vg2(uint64_t, uint32_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_ver_za16_bf16_vg2)))
@ -1562,10 +1608,14 @@ __ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_ver_za8_u8_vg
svuint8x2_t svread_ver_za8_u8_vg2(uint64_t, uint32_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_ver_za8_s8_vg2)))
svint8x2_t svread_ver_za8_s8_vg2(uint64_t, uint32_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_ver_za8_mf8_vg2)))
svmfloat8x2_t svread_ver_za8_mf8_vg2(uint64_t, uint32_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_ver_za8_u8_vg4)))
svuint8x4_t svread_ver_za8_u8_vg4(uint64_t, uint32_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_ver_za8_s8_vg4)))
svint8x4_t svread_ver_za8_s8_vg4(uint64_t, uint32_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_ver_za8_mf8_vg4)))
svmfloat8x4_t svread_ver_za8_mf8_vg4(uint64_t, uint32_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_za16_u16_vg1x2)))
svuint16x2_t svread_za16_u16_vg1x2(uint32_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_za16_bf16_vg1x2)))
@ -1610,10 +1660,14 @@ __ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_za8_u8_vg1x2)
svuint8x2_t svread_za8_u8_vg1x2(uint32_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_za8_s8_vg1x2)))
svint8x2_t svread_za8_s8_vg1x2(uint32_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_za8_mf8_vg1x2)))
svmfloat8x2_t svread_za8_mf8_vg1x2(uint32_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_za8_u8_vg1x4)))
svuint8x4_t svread_za8_u8_vg1x4(uint32_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_za8_s8_vg1x4)))
svint8x4_t svread_za8_s8_vg1x4(uint32_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_za8_mf8_vg1x4)))
svmfloat8x4_t svread_za8_mf8_vg1x4(uint32_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svstr_zt)))
void svstr_zt(uint64_t, void *);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svsub_write_single_za32_u32_vg1x2)))
@ -1760,10 +1814,14 @@ __ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za8_u8_v
void svwrite_hor_za8_u8_vg2(uint64_t, uint32_t, svuint8x2_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za8_s8_vg2)))
void svwrite_hor_za8_s8_vg2(uint64_t, uint32_t, svint8x2_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za8_mf8_vg2)))
void svwrite_hor_za8_mf8_vg2(uint64_t, uint32_t, svmfloat8x2_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za8_u8_vg4)))
void svwrite_hor_za8_u8_vg4(uint64_t, uint32_t, svuint8x4_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za8_s8_vg4)))
void svwrite_hor_za8_s8_vg4(uint64_t, uint32_t, svint8x4_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za8_mf8_vg4)))
void svwrite_hor_za8_mf8_vg4(uint64_t, uint32_t, svmfloat8x4_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za16_u16_vg2)))
void svwrite_ver_za16_u16_vg2(uint64_t, uint32_t, svuint16x2_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za16_bf16_vg2)))
@ -1808,10 +1866,14 @@ __ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za8_u8_v
void svwrite_ver_za8_u8_vg2(uint64_t, uint32_t, svuint8x2_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za8_s8_vg2)))
void svwrite_ver_za8_s8_vg2(uint64_t, uint32_t, svint8x2_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za8_mf8_vg2)))
void svwrite_ver_za8_mf8_vg2(uint64_t, uint32_t, svmfloat8x2_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za8_u8_vg4)))
void svwrite_ver_za8_u8_vg4(uint64_t, uint32_t, svuint8x4_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za8_s8_vg4)))
void svwrite_ver_za8_s8_vg4(uint64_t, uint32_t, svint8x4_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za8_mf8_vg4)))
void svwrite_ver_za8_mf8_vg4(uint64_t, uint32_t, svmfloat8x4_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_za16_u16_vg1x2)))
void svwrite_za16_u16_vg1x2(uint32_t, svuint16x2_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_za16_bf16_vg1x2)))
@ -1856,10 +1918,14 @@ __ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_za8_u8_vg1x2
void svwrite_za8_u8_vg1x2(uint32_t, svuint8x2_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_za8_s8_vg1x2)))
void svwrite_za8_s8_vg1x2(uint32_t, svint8x2_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_za8_mf8_vg1x2)))
void svwrite_za8_mf8_vg1x2(uint32_t, svmfloat8x2_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_za8_u8_vg1x4)))
void svwrite_za8_u8_vg1x4(uint32_t, svuint8x4_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_za8_s8_vg1x4)))
void svwrite_za8_s8_vg1x4(uint32_t, svint8x4_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_za8_mf8_vg1x4)))
void svwrite_za8_mf8_vg1x4(uint32_t, svmfloat8x4_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svzero_zt)))
void svzero_zt(uint64_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svadd_write_single_za32_u32_vg1x2)))
@ -2338,10 +2404,14 @@ __aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za8_u8_
void svwrite_hor_za8_vg2(uint64_t, uint32_t, svuint8x2_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za8_s8_vg2)))
void svwrite_hor_za8_vg2(uint64_t, uint32_t, svint8x2_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za8_mf8_vg2)))
void svwrite_hor_za8_vg2(uint64_t, uint32_t, svmfloat8x2_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za8_u8_vg4)))
void svwrite_hor_za8_vg4(uint64_t, uint32_t, svuint8x4_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za8_s8_vg4)))
void svwrite_hor_za8_vg4(uint64_t, uint32_t, svint8x4_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za8_mf8_vg4)))
void svwrite_hor_za8_vg4(uint64_t, uint32_t, svmfloat8x4_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za16_u16_vg2)))
void svwrite_ver_za16_vg2(uint64_t, uint32_t, svuint16x2_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za16_bf16_vg2)))
@ -2386,10 +2456,14 @@ __aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za8_u8_
void svwrite_ver_za8_vg2(uint64_t, uint32_t, svuint8x2_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za8_s8_vg2)))
void svwrite_ver_za8_vg2(uint64_t, uint32_t, svint8x2_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za8_mf8_vg2)))
void svwrite_ver_za8_vg2(uint64_t, uint32_t, svmfloat8x2_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za8_u8_vg4)))
void svwrite_ver_za8_vg4(uint64_t, uint32_t, svuint8x4_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za8_s8_vg4)))
void svwrite_ver_za8_vg4(uint64_t, uint32_t, svint8x4_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za8_mf8_vg4)))
void svwrite_ver_za8_vg4(uint64_t, uint32_t, svmfloat8x4_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_za16_u16_vg1x2)))
void svwrite_za16_vg1x2(uint32_t, svuint16x2_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_za16_bf16_vg1x2)))
@ -2434,10 +2508,14 @@ __aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_za8_u8_vg1x
void svwrite_za8_vg1x2(uint32_t, svuint8x2_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_za8_s8_vg1x2)))
void svwrite_za8_vg1x2(uint32_t, svint8x2_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_za8_mf8_vg1x2)))
void svwrite_za8_vg1x2(uint32_t, svmfloat8x2_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_za8_u8_vg1x4)))
void svwrite_za8_vg1x4(uint32_t, svuint8x4_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_za8_s8_vg1x4)))
void svwrite_za8_vg1x4(uint32_t, svint8x4_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_za8_mf8_vg1x4)))
void svwrite_za8_vg1x4(uint32_t, svmfloat8x4_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svadd_za64_f64_vg1x2)))
void svadd_za64_f64_vg1x2(uint32_t, svfloat64x2_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svadd_za64_f64_vg1x4)))
@ -2782,6 +2860,602 @@ __aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svvdot_lane_za64_s1
void svvdot_lane_za64_vg1x4(uint32_t, svint16x4_t, svint16_t, uint64_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svvdot_lane_za64_u16_vg1x4)))
void svvdot_lane_za64_vg1x4(uint32_t, svuint16x4_t, svuint16_t, uint64_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_1x1_za32_s8_u8)))
void svmop4a_1x1_za32_s8_u8(uint64_t, svint8_t, svuint8_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_1x1_za32_u8_s8)))
void svmop4a_1x1_za32_u8_s8(uint64_t, svuint8_t, svint8_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_1x1_za32_bf16_bf16)))
void svmop4a_1x1_za32_bf16_bf16(uint64_t, svbfloat16_t, svbfloat16_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_1x1_za32_f16_f16)))
void svmop4a_1x1_za32_f16_f16(uint64_t, svfloat16_t, svfloat16_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_1x1_za32_f32_f32)))
void svmop4a_1x1_za32_f32_f32(uint64_t, svfloat32_t, svfloat32_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_1x1_za32_s8_s8)))
void svmop4a_1x1_za32_s8_s8(uint64_t, svint8_t, svint8_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_1x1_za32_s16_s16)))
void svmop4a_1x1_za32_s16_s16(uint64_t, svint16_t, svint16_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_1x1_za32_u8_u8)))
void svmop4a_1x1_za32_u8_u8(uint64_t, svuint8_t, svuint8_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_1x1_za32_u16_u16)))
void svmop4a_1x1_za32_u16_u16(uint64_t, svuint16_t, svuint16_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_1x2_za32_s8_u8)))
void svmop4a_1x2_za32_s8_u8(uint64_t, svint8_t, svuint8x2_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_1x2_za32_u8_s8)))
void svmop4a_1x2_za32_u8_s8(uint64_t, svuint8_t, svint8x2_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_1x2_za32_bf16_bf16)))
void svmop4a_1x2_za32_bf16_bf16(uint64_t, svbfloat16_t, svbfloat16x2_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_1x2_za32_f16_f16)))
void svmop4a_1x2_za32_f16_f16(uint64_t, svfloat16_t, svfloat16x2_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_1x2_za32_f32_f32)))
void svmop4a_1x2_za32_f32_f32(uint64_t, svfloat32_t, svfloat32x2_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_1x2_za32_s8_s8)))
void svmop4a_1x2_za32_s8_s8(uint64_t, svint8_t, svint8x2_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_1x2_za32_s16_s16)))
void svmop4a_1x2_za32_s16_s16(uint64_t, svint16_t, svint16x2_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_1x2_za32_u8_u8)))
void svmop4a_1x2_za32_u8_u8(uint64_t, svuint8_t, svuint8x2_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_1x2_za32_u16_u16)))
void svmop4a_1x2_za32_u16_u16(uint64_t, svuint16_t, svuint16x2_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_2x1_za32_s8_u8)))
void svmop4a_2x1_za32_s8_u8(uint64_t, svint8x2_t, svuint8_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_2x1_za32_u8_s8)))
void svmop4a_2x1_za32_u8_s8(uint64_t, svuint8x2_t, svint8_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_2x1_za32_bf16_bf16)))
void svmop4a_2x1_za32_bf16_bf16(uint64_t, svbfloat16x2_t, svbfloat16_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_2x1_za32_f16_f16)))
void svmop4a_2x1_za32_f16_f16(uint64_t, svfloat16x2_t, svfloat16_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_2x1_za32_f32_f32)))
void svmop4a_2x1_za32_f32_f32(uint64_t, svfloat32x2_t, svfloat32_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_2x1_za32_s8_s8)))
void svmop4a_2x1_za32_s8_s8(uint64_t, svint8x2_t, svint8_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_2x1_za32_s16_s16)))
void svmop4a_2x1_za32_s16_s16(uint64_t, svint16x2_t, svint16_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_2x1_za32_u8_u8)))
void svmop4a_2x1_za32_u8_u8(uint64_t, svuint8x2_t, svuint8_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_2x1_za32_u16_u16)))
void svmop4a_2x1_za32_u16_u16(uint64_t, svuint16x2_t, svuint16_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_2x2_za32_s8_u8)))
void svmop4a_2x2_za32_s8_u8(uint64_t, svint8x2_t, svuint8x2_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_2x2_za32_u8_s8)))
void svmop4a_2x2_za32_u8_s8(uint64_t, svuint8x2_t, svint8x2_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_2x2_za32_bf16_bf16)))
void svmop4a_2x2_za32_bf16_bf16(uint64_t, svbfloat16x2_t, svbfloat16x2_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_2x2_za32_f16_f16)))
void svmop4a_2x2_za32_f16_f16(uint64_t, svfloat16x2_t, svfloat16x2_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_2x2_za32_f32_f32)))
void svmop4a_2x2_za32_f32_f32(uint64_t, svfloat32x2_t, svfloat32x2_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_2x2_za32_s8_s8)))
void svmop4a_2x2_za32_s8_s8(uint64_t, svint8x2_t, svint8x2_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_2x2_za32_s16_s16)))
void svmop4a_2x2_za32_s16_s16(uint64_t, svint16x2_t, svint16x2_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_2x2_za32_u8_u8)))
void svmop4a_2x2_za32_u8_u8(uint64_t, svuint8x2_t, svuint8x2_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_2x2_za32_u16_u16)))
void svmop4a_2x2_za32_u16_u16(uint64_t, svuint16x2_t, svuint16x2_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_1x1_za32_s8_u8)))
void svmop4s_1x1_za32_s8_u8(uint64_t, svint8_t, svuint8_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_1x1_za32_u8_s8)))
void svmop4s_1x1_za32_u8_s8(uint64_t, svuint8_t, svint8_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_1x1_za32_bf16_bf16)))
void svmop4s_1x1_za32_bf16_bf16(uint64_t, svbfloat16_t, svbfloat16_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_1x1_za32_f16_f16)))
void svmop4s_1x1_za32_f16_f16(uint64_t, svfloat16_t, svfloat16_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_1x1_za32_f32_f32)))
void svmop4s_1x1_za32_f32_f32(uint64_t, svfloat32_t, svfloat32_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_1x1_za32_s8_s8)))
void svmop4s_1x1_za32_s8_s8(uint64_t, svint8_t, svint8_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_1x1_za32_s16_s16)))
void svmop4s_1x1_za32_s16_s16(uint64_t, svint16_t, svint16_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_1x1_za32_u8_u8)))
void svmop4s_1x1_za32_u8_u8(uint64_t, svuint8_t, svuint8_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_1x1_za32_u16_u16)))
void svmop4s_1x1_za32_u16_u16(uint64_t, svuint16_t, svuint16_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_1x2_za32_s8_u8)))
void svmop4s_1x2_za32_s8_u8(uint64_t, svint8_t, svuint8x2_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_1x2_za32_u8_s8)))
void svmop4s_1x2_za32_u8_s8(uint64_t, svuint8_t, svint8x2_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_1x2_za32_bf16_bf16)))
void svmop4s_1x2_za32_bf16_bf16(uint64_t, svbfloat16_t, svbfloat16x2_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_1x2_za32_f16_f16)))
void svmop4s_1x2_za32_f16_f16(uint64_t, svfloat16_t, svfloat16x2_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_1x2_za32_f32_f32)))
void svmop4s_1x2_za32_f32_f32(uint64_t, svfloat32_t, svfloat32x2_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_1x2_za32_s8_s8)))
void svmop4s_1x2_za32_s8_s8(uint64_t, svint8_t, svint8x2_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_1x2_za32_s16_s16)))
void svmop4s_1x2_za32_s16_s16(uint64_t, svint16_t, svint16x2_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_1x2_za32_u8_u8)))
void svmop4s_1x2_za32_u8_u8(uint64_t, svuint8_t, svuint8x2_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_1x2_za32_u16_u16)))
void svmop4s_1x2_za32_u16_u16(uint64_t, svuint16_t, svuint16x2_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_2x1_za32_s8_u8)))
void svmop4s_2x1_za32_s8_u8(uint64_t, svint8x2_t, svuint8_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_2x1_za32_u8_s8)))
void svmop4s_2x1_za32_u8_s8(uint64_t, svuint8x2_t, svint8_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_2x1_za32_bf16_bf16)))
void svmop4s_2x1_za32_bf16_bf16(uint64_t, svbfloat16x2_t, svbfloat16_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_2x1_za32_f16_f16)))
void svmop4s_2x1_za32_f16_f16(uint64_t, svfloat16x2_t, svfloat16_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_2x1_za32_f32_f32)))
void svmop4s_2x1_za32_f32_f32(uint64_t, svfloat32x2_t, svfloat32_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_2x1_za32_s8_s8)))
void svmop4s_2x1_za32_s8_s8(uint64_t, svint8x2_t, svint8_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_2x1_za32_s16_s16)))
void svmop4s_2x1_za32_s16_s16(uint64_t, svint16x2_t, svint16_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_2x1_za32_u8_u8)))
void svmop4s_2x1_za32_u8_u8(uint64_t, svuint8x2_t, svuint8_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_2x1_za32_u16_u16)))
void svmop4s_2x1_za32_u16_u16(uint64_t, svuint16x2_t, svuint16_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_2x2_za32_s8_u8)))
void svmop4s_2x2_za32_s8_u8(uint64_t, svint8x2_t, svuint8x2_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_2x2_za32_u8_s8)))
void svmop4s_2x2_za32_u8_s8(uint64_t, svuint8x2_t, svint8x2_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_2x2_za32_bf16_bf16)))
void svmop4s_2x2_za32_bf16_bf16(uint64_t, svbfloat16x2_t, svbfloat16x2_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_2x2_za32_f16_f16)))
void svmop4s_2x2_za32_f16_f16(uint64_t, svfloat16x2_t, svfloat16x2_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_2x2_za32_f32_f32)))
void svmop4s_2x2_za32_f32_f32(uint64_t, svfloat32x2_t, svfloat32x2_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_2x2_za32_s8_s8)))
void svmop4s_2x2_za32_s8_s8(uint64_t, svint8x2_t, svint8x2_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_2x2_za32_s16_s16)))
void svmop4s_2x2_za32_s16_s16(uint64_t, svint16x2_t, svint16x2_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_2x2_za32_u8_u8)))
void svmop4s_2x2_za32_u8_u8(uint64_t, svuint8x2_t, svuint8x2_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_2x2_za32_u16_u16)))
void svmop4s_2x2_za32_u16_u16(uint64_t, svuint16x2_t, svuint16x2_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_1x1_za32_s8_u8)))
void svmop4a_za32(uint64_t, svint8_t, svuint8_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_1x1_za32_u8_s8)))
void svmop4a_za32(uint64_t, svuint8_t, svint8_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_1x1_za32_bf16_bf16)))
void svmop4a_za32(uint64_t, svbfloat16_t, svbfloat16_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_1x1_za32_f16_f16)))
void svmop4a_za32(uint64_t, svfloat16_t, svfloat16_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_1x1_za32_f32_f32)))
void svmop4a_za32(uint64_t, svfloat32_t, svfloat32_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_1x1_za32_s8_s8)))
void svmop4a_za32(uint64_t, svint8_t, svint8_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_1x1_za32_s16_s16)))
void svmop4a_za32(uint64_t, svint16_t, svint16_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_1x1_za32_u8_u8)))
void svmop4a_za32(uint64_t, svuint8_t, svuint8_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_1x1_za32_u16_u16)))
void svmop4a_za32(uint64_t, svuint16_t, svuint16_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_1x2_za32_s8_u8)))
void svmop4a_za32(uint64_t, svint8_t, svuint8x2_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_1x2_za32_u8_s8)))
void svmop4a_za32(uint64_t, svuint8_t, svint8x2_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_1x2_za32_bf16_bf16)))
void svmop4a_za32(uint64_t, svbfloat16_t, svbfloat16x2_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_1x2_za32_f16_f16)))
void svmop4a_za32(uint64_t, svfloat16_t, svfloat16x2_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_1x2_za32_f32_f32)))
void svmop4a_za32(uint64_t, svfloat32_t, svfloat32x2_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_1x2_za32_s8_s8)))
void svmop4a_za32(uint64_t, svint8_t, svint8x2_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_1x2_za32_s16_s16)))
void svmop4a_za32(uint64_t, svint16_t, svint16x2_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_1x2_za32_u8_u8)))
void svmop4a_za32(uint64_t, svuint8_t, svuint8x2_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_1x2_za32_u16_u16)))
void svmop4a_za32(uint64_t, svuint16_t, svuint16x2_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_2x1_za32_s8_u8)))
void svmop4a_za32(uint64_t, svint8x2_t, svuint8_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_2x1_za32_u8_s8)))
void svmop4a_za32(uint64_t, svuint8x2_t, svint8_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_2x1_za32_bf16_bf16)))
void svmop4a_za32(uint64_t, svbfloat16x2_t, svbfloat16_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_2x1_za32_f16_f16)))
void svmop4a_za32(uint64_t, svfloat16x2_t, svfloat16_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_2x1_za32_f32_f32)))
void svmop4a_za32(uint64_t, svfloat32x2_t, svfloat32_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_2x1_za32_s8_s8)))
void svmop4a_za32(uint64_t, svint8x2_t, svint8_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_2x1_za32_s16_s16)))
void svmop4a_za32(uint64_t, svint16x2_t, svint16_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_2x1_za32_u8_u8)))
void svmop4a_za32(uint64_t, svuint8x2_t, svuint8_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_2x1_za32_u16_u16)))
void svmop4a_za32(uint64_t, svuint16x2_t, svuint16_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_2x2_za32_s8_u8)))
void svmop4a_za32(uint64_t, svint8x2_t, svuint8x2_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_2x2_za32_u8_s8)))
void svmop4a_za32(uint64_t, svuint8x2_t, svint8x2_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_2x2_za32_bf16_bf16)))
void svmop4a_za32(uint64_t, svbfloat16x2_t, svbfloat16x2_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_2x2_za32_f16_f16)))
void svmop4a_za32(uint64_t, svfloat16x2_t, svfloat16x2_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_2x2_za32_f32_f32)))
void svmop4a_za32(uint64_t, svfloat32x2_t, svfloat32x2_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_2x2_za32_s8_s8)))
void svmop4a_za32(uint64_t, svint8x2_t, svint8x2_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_2x2_za32_s16_s16)))
void svmop4a_za32(uint64_t, svint16x2_t, svint16x2_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_2x2_za32_u8_u8)))
void svmop4a_za32(uint64_t, svuint8x2_t, svuint8x2_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_2x2_za32_u16_u16)))
void svmop4a_za32(uint64_t, svuint16x2_t, svuint16x2_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_1x1_za32_s8_u8)))
void svmop4s_za32(uint64_t, svint8_t, svuint8_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_1x1_za32_u8_s8)))
void svmop4s_za32(uint64_t, svuint8_t, svint8_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_1x1_za32_bf16_bf16)))
void svmop4s_za32(uint64_t, svbfloat16_t, svbfloat16_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_1x1_za32_f16_f16)))
void svmop4s_za32(uint64_t, svfloat16_t, svfloat16_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_1x1_za32_f32_f32)))
void svmop4s_za32(uint64_t, svfloat32_t, svfloat32_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_1x1_za32_s8_s8)))
void svmop4s_za32(uint64_t, svint8_t, svint8_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_1x1_za32_s16_s16)))
void svmop4s_za32(uint64_t, svint16_t, svint16_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_1x1_za32_u8_u8)))
void svmop4s_za32(uint64_t, svuint8_t, svuint8_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_1x1_za32_u16_u16)))
void svmop4s_za32(uint64_t, svuint16_t, svuint16_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_1x2_za32_s8_u8)))
void svmop4s_za32(uint64_t, svint8_t, svuint8x2_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_1x2_za32_u8_s8)))
void svmop4s_za32(uint64_t, svuint8_t, svint8x2_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_1x2_za32_bf16_bf16)))
void svmop4s_za32(uint64_t, svbfloat16_t, svbfloat16x2_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_1x2_za32_f16_f16)))
void svmop4s_za32(uint64_t, svfloat16_t, svfloat16x2_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_1x2_za32_f32_f32)))
void svmop4s_za32(uint64_t, svfloat32_t, svfloat32x2_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_1x2_za32_s8_s8)))
void svmop4s_za32(uint64_t, svint8_t, svint8x2_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_1x2_za32_s16_s16)))
void svmop4s_za32(uint64_t, svint16_t, svint16x2_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_1x2_za32_u8_u8)))
void svmop4s_za32(uint64_t, svuint8_t, svuint8x2_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_1x2_za32_u16_u16)))
void svmop4s_za32(uint64_t, svuint16_t, svuint16x2_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_2x1_za32_s8_u8)))
void svmop4s_za32(uint64_t, svint8x2_t, svuint8_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_2x1_za32_u8_s8)))
void svmop4s_za32(uint64_t, svuint8x2_t, svint8_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_2x1_za32_bf16_bf16)))
void svmop4s_za32(uint64_t, svbfloat16x2_t, svbfloat16_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_2x1_za32_f16_f16)))
void svmop4s_za32(uint64_t, svfloat16x2_t, svfloat16_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_2x1_za32_f32_f32)))
void svmop4s_za32(uint64_t, svfloat32x2_t, svfloat32_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_2x1_za32_s8_s8)))
void svmop4s_za32(uint64_t, svint8x2_t, svint8_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_2x1_za32_s16_s16)))
void svmop4s_za32(uint64_t, svint16x2_t, svint16_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_2x1_za32_u8_u8)))
void svmop4s_za32(uint64_t, svuint8x2_t, svuint8_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_2x1_za32_u16_u16)))
void svmop4s_za32(uint64_t, svuint16x2_t, svuint16_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_2x2_za32_s8_u8)))
void svmop4s_za32(uint64_t, svint8x2_t, svuint8x2_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_2x2_za32_u8_s8)))
void svmop4s_za32(uint64_t, svuint8x2_t, svint8x2_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_2x2_za32_bf16_bf16)))
void svmop4s_za32(uint64_t, svbfloat16x2_t, svbfloat16x2_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_2x2_za32_f16_f16)))
void svmop4s_za32(uint64_t, svfloat16x2_t, svfloat16x2_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_2x2_za32_f32_f32)))
void svmop4s_za32(uint64_t, svfloat32x2_t, svfloat32x2_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_2x2_za32_s8_s8)))
void svmop4s_za32(uint64_t, svint8x2_t, svint8x2_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_2x2_za32_s16_s16)))
void svmop4s_za32(uint64_t, svint16x2_t, svint16x2_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_2x2_za32_u8_u8)))
void svmop4s_za32(uint64_t, svuint8x2_t, svuint8x2_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_2x2_za32_u16_u16)))
void svmop4s_za32(uint64_t, svuint16x2_t, svuint16x2_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_1x1_za16_bf16_bf16)))
void svmop4a_1x1_za16_bf16_bf16(uint64_t, svbfloat16_t, svbfloat16_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_1x2_za16_bf16_bf16)))
void svmop4a_1x2_za16_bf16_bf16(uint64_t, svbfloat16_t, svbfloat16x2_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_2x1_za16_bf16_bf16)))
void svmop4a_2x1_za16_bf16_bf16(uint64_t, svbfloat16x2_t, svbfloat16_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_2x2_za16_bf16_bf16)))
void svmop4a_2x2_za16_bf16_bf16(uint64_t, svbfloat16x2_t, svbfloat16x2_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_1x1_za16_bf16_bf16)))
void svmop4s_1x1_za16_bf16_bf16(uint64_t, svbfloat16_t, svbfloat16_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_1x2_za16_bf16_bf16)))
void svmop4s_1x2_za16_bf16_bf16(uint64_t, svbfloat16_t, svbfloat16x2_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_2x1_za16_bf16_bf16)))
void svmop4s_2x1_za16_bf16_bf16(uint64_t, svbfloat16x2_t, svbfloat16_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_2x2_za16_bf16_bf16)))
void svmop4s_2x2_za16_bf16_bf16(uint64_t, svbfloat16x2_t, svbfloat16x2_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_1x1_za16_bf16_bf16)))
void svmop4a_za16(uint64_t, svbfloat16_t, svbfloat16_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_1x2_za16_bf16_bf16)))
void svmop4a_za16(uint64_t, svbfloat16_t, svbfloat16x2_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_2x1_za16_bf16_bf16)))
void svmop4a_za16(uint64_t, svbfloat16x2_t, svbfloat16_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_2x2_za16_bf16_bf16)))
void svmop4a_za16(uint64_t, svbfloat16x2_t, svbfloat16x2_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_1x1_za16_bf16_bf16)))
void svmop4s_za16(uint64_t, svbfloat16_t, svbfloat16_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_1x2_za16_bf16_bf16)))
void svmop4s_za16(uint64_t, svbfloat16_t, svbfloat16x2_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_2x1_za16_bf16_bf16)))
void svmop4s_za16(uint64_t, svbfloat16x2_t, svbfloat16_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_2x2_za16_bf16_bf16)))
void svmop4s_za16(uint64_t, svbfloat16x2_t, svbfloat16x2_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_1x1_za16_f16_f16)))
void svmop4a_1x1_za16_f16_f16(uint64_t, svfloat16_t, svfloat16_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_1x2_za16_f16_f16)))
void svmop4a_1x2_za16_f16_f16(uint64_t, svfloat16_t, svfloat16x2_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_2x1_za16_f16_f16)))
void svmop4a_2x1_za16_f16_f16(uint64_t, svfloat16x2_t, svfloat16_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_2x2_za16_f16_f16)))
void svmop4a_2x2_za16_f16_f16(uint64_t, svfloat16x2_t, svfloat16x2_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_1x1_za16_f16_f16)))
void svmop4s_1x1_za16_f16_f16(uint64_t, svfloat16_t, svfloat16_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_1x2_za16_f16_f16)))
void svmop4s_1x2_za16_f16_f16(uint64_t, svfloat16_t, svfloat16x2_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_2x1_za16_f16_f16)))
void svmop4s_2x1_za16_f16_f16(uint64_t, svfloat16x2_t, svfloat16_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_2x2_za16_f16_f16)))
void svmop4s_2x2_za16_f16_f16(uint64_t, svfloat16x2_t, svfloat16x2_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_1x1_za16_f16_f16)))
void svmop4a_za16(uint64_t, svfloat16_t, svfloat16_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_1x2_za16_f16_f16)))
void svmop4a_za16(uint64_t, svfloat16_t, svfloat16x2_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_2x1_za16_f16_f16)))
void svmop4a_za16(uint64_t, svfloat16x2_t, svfloat16_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_2x2_za16_f16_f16)))
void svmop4a_za16(uint64_t, svfloat16x2_t, svfloat16x2_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_1x1_za16_f16_f16)))
void svmop4s_za16(uint64_t, svfloat16_t, svfloat16_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_1x2_za16_f16_f16)))
void svmop4s_za16(uint64_t, svfloat16_t, svfloat16x2_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_2x1_za16_f16_f16)))
void svmop4s_za16(uint64_t, svfloat16x2_t, svfloat16_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_2x2_za16_f16_f16)))
void svmop4s_za16(uint64_t, svfloat16x2_t, svfloat16x2_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_1x1_za64_f64_f64)))
void svmop4a_1x1_za64_f64_f64(uint64_t, svfloat64_t, svfloat64_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_1x2_za64_f64_f64)))
void svmop4a_1x2_za64_f64_f64(uint64_t, svfloat64_t, svfloat64x2_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_2x1_za64_f64_f64)))
void svmop4a_2x1_za64_f64_f64(uint64_t, svfloat64x2_t, svfloat64_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_2x2_za64_f64_f64)))
void svmop4a_2x2_za64_f64_f64(uint64_t, svfloat64x2_t, svfloat64x2_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_1x1_za64_f64_f64)))
void svmop4s_1x1_za64_f64_f64(uint64_t, svfloat64_t, svfloat64_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_1x2_za64_f64_f64)))
void svmop4s_1x2_za64_f64_f64(uint64_t, svfloat64_t, svfloat64x2_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_2x1_za64_f64_f64)))
void svmop4s_2x1_za64_f64_f64(uint64_t, svfloat64x2_t, svfloat64_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_2x2_za64_f64_f64)))
void svmop4s_2x2_za64_f64_f64(uint64_t, svfloat64x2_t, svfloat64x2_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_1x1_za64_f64_f64)))
void svmop4a_za64(uint64_t, svfloat64_t, svfloat64_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_1x2_za64_f64_f64)))
void svmop4a_za64(uint64_t, svfloat64_t, svfloat64x2_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_2x1_za64_f64_f64)))
void svmop4a_za64(uint64_t, svfloat64x2_t, svfloat64_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_2x2_za64_f64_f64)))
void svmop4a_za64(uint64_t, svfloat64x2_t, svfloat64x2_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_1x1_za64_f64_f64)))
void svmop4s_za64(uint64_t, svfloat64_t, svfloat64_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_1x2_za64_f64_f64)))
void svmop4s_za64(uint64_t, svfloat64_t, svfloat64x2_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_2x1_za64_f64_f64)))
void svmop4s_za64(uint64_t, svfloat64x2_t, svfloat64_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_2x2_za64_f64_f64)))
void svmop4s_za64(uint64_t, svfloat64x2_t, svfloat64x2_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_1x1_za16_mf8_mf8_fpm)))
void svmop4a_1x1_za16_mf8_mf8_fpm(uint64_t, svmfloat8_t, svmfloat8_t, fpm_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_1x2_za16_mf8_mf8_fpm)))
void svmop4a_1x2_za16_mf8_mf8_fpm(uint64_t, svmfloat8_t, svmfloat8x2_t, fpm_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_2x1_za16_mf8_mf8_fpm)))
void svmop4a_2x1_za16_mf8_mf8_fpm(uint64_t, svmfloat8x2_t, svmfloat8_t, fpm_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_2x2_za16_mf8_mf8_fpm)))
void svmop4a_2x2_za16_mf8_mf8_fpm(uint64_t, svmfloat8x2_t, svmfloat8x2_t, fpm_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_1x1_za16_mf8_mf8_fpm)))
void svmop4a_za16_fpm(uint64_t, svmfloat8_t, svmfloat8_t, fpm_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_1x2_za16_mf8_mf8_fpm)))
void svmop4a_za16_fpm(uint64_t, svmfloat8_t, svmfloat8x2_t, fpm_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_2x1_za16_mf8_mf8_fpm)))
void svmop4a_za16_fpm(uint64_t, svmfloat8x2_t, svmfloat8_t, fpm_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_2x2_za16_mf8_mf8_fpm)))
void svmop4a_za16_fpm(uint64_t, svmfloat8x2_t, svmfloat8x2_t, fpm_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_1x1_za32_mf8_mf8_fpm)))
void svmop4a_1x1_za32_mf8_mf8_fpm(uint64_t, svmfloat8_t, svmfloat8_t, fpm_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_1x2_za32_mf8_mf8_fpm)))
void svmop4a_1x2_za32_mf8_mf8_fpm(uint64_t, svmfloat8_t, svmfloat8x2_t, fpm_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_2x1_za32_mf8_mf8_fpm)))
void svmop4a_2x1_za32_mf8_mf8_fpm(uint64_t, svmfloat8x2_t, svmfloat8_t, fpm_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_2x2_za32_mf8_mf8_fpm)))
void svmop4a_2x2_za32_mf8_mf8_fpm(uint64_t, svmfloat8x2_t, svmfloat8x2_t, fpm_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_1x1_za32_mf8_mf8_fpm)))
void svmop4a_za32_fpm(uint64_t, svmfloat8_t, svmfloat8_t, fpm_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_1x2_za32_mf8_mf8_fpm)))
void svmop4a_za32_fpm(uint64_t, svmfloat8_t, svmfloat8x2_t, fpm_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_2x1_za32_mf8_mf8_fpm)))
void svmop4a_za32_fpm(uint64_t, svmfloat8x2_t, svmfloat8_t, fpm_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_2x2_za32_mf8_mf8_fpm)))
void svmop4a_za32_fpm(uint64_t, svmfloat8x2_t, svmfloat8x2_t, fpm_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_1x1_za64_s16_u16)))
void svmop4a_1x1_za64_s16_u16(uint64_t, svint16_t, svuint16_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_1x1_za64_u16_s16)))
void svmop4a_1x1_za64_u16_s16(uint64_t, svuint16_t, svint16_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_1x1_za64_s16_s16)))
void svmop4a_1x1_za64_s16_s16(uint64_t, svint16_t, svint16_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_1x1_za64_u16_u16)))
void svmop4a_1x1_za64_u16_u16(uint64_t, svuint16_t, svuint16_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_1x2_za64_s16_u16)))
void svmop4a_1x2_za64_s16_u16(uint64_t, svint16_t, svuint16x2_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_1x2_za64_u16_s16)))
void svmop4a_1x2_za64_u16_s16(uint64_t, svuint16_t, svint16x2_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_1x2_za64_s16_s16)))
void svmop4a_1x2_za64_s16_s16(uint64_t, svint16_t, svint16x2_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_1x2_za64_u16_u16)))
void svmop4a_1x2_za64_u16_u16(uint64_t, svuint16_t, svuint16x2_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_2x1_za64_s16_u16)))
void svmop4a_2x1_za64_s16_u16(uint64_t, svint16x2_t, svuint16_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_2x1_za64_u16_s16)))
void svmop4a_2x1_za64_u16_s16(uint64_t, svuint16x2_t, svint16_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_2x1_za64_s16_s16)))
void svmop4a_2x1_za64_s16_s16(uint64_t, svint16x2_t, svint16_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_2x1_za64_u16_u16)))
void svmop4a_2x1_za64_u16_u16(uint64_t, svuint16x2_t, svuint16_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_2x2_za64_s16_u16)))
void svmop4a_2x2_za64_s16_u16(uint64_t, svint16x2_t, svuint16x2_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_2x2_za64_u16_s16)))
void svmop4a_2x2_za64_u16_s16(uint64_t, svuint16x2_t, svint16x2_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_2x2_za64_s16_s16)))
void svmop4a_2x2_za64_s16_s16(uint64_t, svint16x2_t, svint16x2_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_2x2_za64_u16_u16)))
void svmop4a_2x2_za64_u16_u16(uint64_t, svuint16x2_t, svuint16x2_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_1x1_za64_s16_u16)))
void svmop4s_1x1_za64_s16_u16(uint64_t, svint16_t, svuint16_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_1x1_za64_u16_s16)))
void svmop4s_1x1_za64_u16_s16(uint64_t, svuint16_t, svint16_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_1x1_za64_s16_s16)))
void svmop4s_1x1_za64_s16_s16(uint64_t, svint16_t, svint16_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_1x1_za64_u16_u16)))
void svmop4s_1x1_za64_u16_u16(uint64_t, svuint16_t, svuint16_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_1x2_za64_s16_u16)))
void svmop4s_1x2_za64_s16_u16(uint64_t, svint16_t, svuint16x2_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_1x2_za64_u16_s16)))
void svmop4s_1x2_za64_u16_s16(uint64_t, svuint16_t, svint16x2_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_1x2_za64_s16_s16)))
void svmop4s_1x2_za64_s16_s16(uint64_t, svint16_t, svint16x2_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_1x2_za64_u16_u16)))
void svmop4s_1x2_za64_u16_u16(uint64_t, svuint16_t, svuint16x2_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_2x1_za64_s16_u16)))
void svmop4s_2x1_za64_s16_u16(uint64_t, svint16x2_t, svuint16_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_2x1_za64_u16_s16)))
void svmop4s_2x1_za64_u16_s16(uint64_t, svuint16x2_t, svint16_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_2x1_za64_s16_s16)))
void svmop4s_2x1_za64_s16_s16(uint64_t, svint16x2_t, svint16_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_2x1_za64_u16_u16)))
void svmop4s_2x1_za64_u16_u16(uint64_t, svuint16x2_t, svuint16_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_2x2_za64_s16_u16)))
void svmop4s_2x2_za64_s16_u16(uint64_t, svint16x2_t, svuint16x2_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_2x2_za64_u16_s16)))
void svmop4s_2x2_za64_u16_s16(uint64_t, svuint16x2_t, svint16x2_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_2x2_za64_s16_s16)))
void svmop4s_2x2_za64_s16_s16(uint64_t, svint16x2_t, svint16x2_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_2x2_za64_u16_u16)))
void svmop4s_2x2_za64_u16_u16(uint64_t, svuint16x2_t, svuint16x2_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_1x1_za64_s16_u16)))
void svmop4a_za64(uint64_t, svint16_t, svuint16_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_1x1_za64_u16_s16)))
void svmop4a_za64(uint64_t, svuint16_t, svint16_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_1x1_za64_s16_s16)))
void svmop4a_za64(uint64_t, svint16_t, svint16_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_1x1_za64_u16_u16)))
void svmop4a_za64(uint64_t, svuint16_t, svuint16_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_1x2_za64_s16_u16)))
void svmop4a_za64(uint64_t, svint16_t, svuint16x2_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_1x2_za64_u16_s16)))
void svmop4a_za64(uint64_t, svuint16_t, svint16x2_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_1x2_za64_s16_s16)))
void svmop4a_za64(uint64_t, svint16_t, svint16x2_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_1x2_za64_u16_u16)))
void svmop4a_za64(uint64_t, svuint16_t, svuint16x2_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_2x1_za64_s16_u16)))
void svmop4a_za64(uint64_t, svint16x2_t, svuint16_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_2x1_za64_u16_s16)))
void svmop4a_za64(uint64_t, svuint16x2_t, svint16_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_2x1_za64_s16_s16)))
void svmop4a_za64(uint64_t, svint16x2_t, svint16_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_2x1_za64_u16_u16)))
void svmop4a_za64(uint64_t, svuint16x2_t, svuint16_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_2x2_za64_s16_u16)))
void svmop4a_za64(uint64_t, svint16x2_t, svuint16x2_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_2x2_za64_u16_s16)))
void svmop4a_za64(uint64_t, svuint16x2_t, svint16x2_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_2x2_za64_s16_s16)))
void svmop4a_za64(uint64_t, svint16x2_t, svint16x2_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_2x2_za64_u16_u16)))
void svmop4a_za64(uint64_t, svuint16x2_t, svuint16x2_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_1x1_za64_s16_u16)))
void svmop4s_za64(uint64_t, svint16_t, svuint16_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_1x1_za64_u16_s16)))
void svmop4s_za64(uint64_t, svuint16_t, svint16_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_1x1_za64_s16_s16)))
void svmop4s_za64(uint64_t, svint16_t, svint16_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_1x1_za64_u16_u16)))
void svmop4s_za64(uint64_t, svuint16_t, svuint16_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_1x2_za64_s16_u16)))
void svmop4s_za64(uint64_t, svint16_t, svuint16x2_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_1x2_za64_u16_s16)))
void svmop4s_za64(uint64_t, svuint16_t, svint16x2_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_1x2_za64_s16_s16)))
void svmop4s_za64(uint64_t, svint16_t, svint16x2_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_1x2_za64_u16_u16)))
void svmop4s_za64(uint64_t, svuint16_t, svuint16x2_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_2x1_za64_s16_u16)))
void svmop4s_za64(uint64_t, svint16x2_t, svuint16_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_2x1_za64_u16_s16)))
void svmop4s_za64(uint64_t, svuint16x2_t, svint16_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_2x1_za64_s16_s16)))
void svmop4s_za64(uint64_t, svint16x2_t, svint16_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_2x1_za64_u16_u16)))
void svmop4s_za64(uint64_t, svuint16x2_t, svuint16_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_2x2_za64_s16_u16)))
void svmop4s_za64(uint64_t, svint16x2_t, svuint16x2_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_2x2_za64_u16_s16)))
void svmop4s_za64(uint64_t, svuint16x2_t, svint16x2_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_2x2_za64_s16_s16)))
void svmop4s_za64(uint64_t, svint16x2_t, svint16x2_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_2x2_za64_u16_u16)))
void svmop4s_za64(uint64_t, svuint16x2_t, svuint16x2_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svtmopa_lane_za32_s8_u8)))
void svtmopa_lane_za32_s8_u8(uint64_t, svint8x2_t, svuint8_t, svuint8_t, uint64_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svtmopa_lane_za32_u8_s8)))
void svtmopa_lane_za32_u8_s8(uint64_t, svuint8x2_t, svint8_t, svuint8_t, uint64_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svtmopa_lane_za32_s8_s8)))
void svtmopa_lane_za32_s8_s8(uint64_t, svint8x2_t, svint8_t, svuint8_t, uint64_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svtmopa_lane_za32_s16_s16)))
void svtmopa_lane_za32_s16_s16(uint64_t, svint16x2_t, svint16_t, svuint8_t, uint64_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svtmopa_lane_za32_bf16_bf16)))
void svtmopa_lane_za32_bf16_bf16(uint64_t, svbfloat16x2_t, svbfloat16_t, svuint8_t, uint64_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svtmopa_lane_za32_f32_f32)))
void svtmopa_lane_za32_f32_f32(uint64_t, svfloat32x2_t, svfloat32_t, svuint8_t, uint64_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svtmopa_lane_za32_f16_f16)))
void svtmopa_lane_za32_f16_f16(uint64_t, svfloat16x2_t, svfloat16_t, svuint8_t, uint64_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svtmopa_lane_za32_u8_u8)))
void svtmopa_lane_za32_u8_u8(uint64_t, svuint8x2_t, svuint8_t, svuint8_t, uint64_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svtmopa_lane_za32_u16_u16)))
void svtmopa_lane_za32_u16_u16(uint64_t, svuint16x2_t, svuint16_t, svuint8_t, uint64_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svtmopa_lane_za32_s8_u8)))
void svtmopa_lane_za32(uint64_t, svint8x2_t, svuint8_t, svuint8_t, uint64_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svtmopa_lane_za32_u8_s8)))
void svtmopa_lane_za32(uint64_t, svuint8x2_t, svint8_t, svuint8_t, uint64_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svtmopa_lane_za32_s8_s8)))
void svtmopa_lane_za32(uint64_t, svint8x2_t, svint8_t, svuint8_t, uint64_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svtmopa_lane_za32_s16_s16)))
void svtmopa_lane_za32(uint64_t, svint16x2_t, svint16_t, svuint8_t, uint64_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svtmopa_lane_za32_bf16_bf16)))
void svtmopa_lane_za32(uint64_t, svbfloat16x2_t, svbfloat16_t, svuint8_t, uint64_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svtmopa_lane_za32_f32_f32)))
void svtmopa_lane_za32(uint64_t, svfloat32x2_t, svfloat32_t, svuint8_t, uint64_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svtmopa_lane_za32_f16_f16)))
void svtmopa_lane_za32(uint64_t, svfloat16x2_t, svfloat16_t, svuint8_t, uint64_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svtmopa_lane_za32_u8_u8)))
void svtmopa_lane_za32(uint64_t, svuint8x2_t, svuint8_t, svuint8_t, uint64_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svtmopa_lane_za32_u16_u16)))
void svtmopa_lane_za32(uint64_t, svuint16x2_t, svuint16_t, svuint8_t, uint64_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svtmopa_lane_za16_bf16_bf16)))
void svtmopa_lane_za16_bf16_bf16(uint64_t, svbfloat16x2_t, svbfloat16_t, svuint8_t, uint64_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svtmopa_lane_za16_bf16_bf16)))
void svtmopa_lane_za16(uint64_t, svbfloat16x2_t, svbfloat16_t, svuint8_t, uint64_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svtmopa_lane_za16_f16_f16)))
void svtmopa_lane_za16_f16_f16(uint64_t, svfloat16x2_t, svfloat16_t, svuint8_t, uint64_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svtmopa_lane_za16_f16_f16)))
void svtmopa_lane_za16(uint64_t, svfloat16x2_t, svfloat16_t, svuint8_t, uint64_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svtmopa_lane_za16_mf8_mf8_fpm)))
void svtmopa_lane_za16_mf8_mf8_fpm(uint64_t, svmfloat8x2_t, svmfloat8_t, svuint8_t, uint64_t, fpm_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svtmopa_lane_za16_mf8_mf8_fpm)))
void svtmopa_lane_za16_fpm(uint64_t, svmfloat8x2_t, svmfloat8_t, svuint8_t, uint64_t, fpm_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svtmopa_lane_za32_mf8_mf8_fpm)))
void svtmopa_lane_za32_mf8_mf8_fpm(uint64_t, svmfloat8x2_t, svmfloat8_t, svuint8_t, uint64_t, fpm_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svtmopa_lane_za32_mf8_mf8_fpm)))
void svtmopa_lane_za32_fpm(uint64_t, svmfloat8x2_t, svmfloat8_t, svuint8_t, uint64_t, fpm_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_hor_za128_u8)))
svuint8_t svreadz_hor_za128_u8(uint64_t, uint32_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_hor_za128_u32)))
@ -2804,6 +3478,8 @@ __ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_hor_za128_s3
svint32_t svreadz_hor_za128_s32(uint64_t, uint32_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_hor_za128_s64)))
svint64_t svreadz_hor_za128_s64(uint64_t, uint32_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_hor_za128_mf8)))
svmfloat8_t svreadz_hor_za128_mf8(uint64_t, uint32_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_hor_za128_s16)))
svint16_t svreadz_hor_za128_s16(uint64_t, uint32_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_hor_za16_u16)))
@ -2870,14 +3546,20 @@ __ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_hor_za8_u8))
svuint8_t svreadz_hor_za8_u8(uint64_t, uint32_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_hor_za8_s8)))
svint8_t svreadz_hor_za8_s8(uint64_t, uint32_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_hor_za8_mf8)))
svmfloat8_t svreadz_hor_za8_mf8(uint64_t, uint32_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_hor_za8_u8_vg2)))
svuint8x2_t svreadz_hor_za8_u8_vg2(uint64_t, uint32_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_hor_za8_s8_vg2)))
svint8x2_t svreadz_hor_za8_s8_vg2(uint64_t, uint32_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_hor_za8_mf8_vg2)))
svmfloat8x2_t svreadz_hor_za8_mf8_vg2(uint64_t, uint32_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_hor_za8_u8_vg4)))
svuint8x4_t svreadz_hor_za8_u8_vg4(uint64_t, uint32_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_hor_za8_s8_vg4)))
svint8x4_t svreadz_hor_za8_s8_vg4(uint64_t, uint32_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_hor_za8_mf8_vg4)))
svmfloat8x4_t svreadz_hor_za8_mf8_vg4(uint64_t, uint32_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_ver_za128_u8)))
svuint8_t svreadz_ver_za128_u8(uint64_t, uint32_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_ver_za128_u32)))
@ -2900,6 +3582,8 @@ __ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_ver_za128_s3
svint32_t svreadz_ver_za128_s32(uint64_t, uint32_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_ver_za128_s64)))
svint64_t svreadz_ver_za128_s64(uint64_t, uint32_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_ver_za128_mf8)))
svmfloat8_t svreadz_ver_za128_mf8(uint64_t, uint32_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_ver_za128_s16)))
svint16_t svreadz_ver_za128_s16(uint64_t, uint32_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_ver_za16_u16)))
@ -2966,14 +3650,20 @@ __ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_ver_za8_u8))
svuint8_t svreadz_ver_za8_u8(uint64_t, uint32_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_ver_za8_s8)))
svint8_t svreadz_ver_za8_s8(uint64_t, uint32_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_ver_za8_mf8)))
svmfloat8_t svreadz_ver_za8_mf8(uint64_t, uint32_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_ver_za8_u8_vg2)))
svuint8x2_t svreadz_ver_za8_u8_vg2(uint64_t, uint32_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_ver_za8_s8_vg2)))
svint8x2_t svreadz_ver_za8_s8_vg2(uint64_t, uint32_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_ver_za8_mf8_vg2)))
svmfloat8x2_t svreadz_ver_za8_mf8_vg2(uint64_t, uint32_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_ver_za8_u8_vg4)))
svuint8x4_t svreadz_ver_za8_u8_vg4(uint64_t, uint32_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_ver_za8_s8_vg4)))
svint8x4_t svreadz_ver_za8_s8_vg4(uint64_t, uint32_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_ver_za8_mf8_vg4)))
svmfloat8x4_t svreadz_ver_za8_mf8_vg4(uint64_t, uint32_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_za16_u16_vg1x2)))
svuint16x2_t svreadz_za16_u16_vg1x2(uint32_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_za16_bf16_vg1x2)))
@ -3018,10 +3708,14 @@ __ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_za8_u8_vg1x2
svuint8x2_t svreadz_za8_u8_vg1x2(uint32_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_za8_s8_vg1x2)))
svint8x2_t svreadz_za8_s8_vg1x2(uint32_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_za8_mf8_vg1x2)))
svmfloat8x2_t svreadz_za8_mf8_vg1x2(uint32_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_za8_u8_vg1x4)))
svuint8x4_t svreadz_za8_u8_vg1x4(uint32_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_za8_s8_vg1x4)))
svint8x4_t svreadz_za8_s8_vg1x4(uint32_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_za8_mf8_vg1x4)))
svmfloat8x4_t svreadz_za8_mf8_vg1x4(uint32_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svzero_za64_vg1x2)))
void svzero_za64_vg1x2(uint32_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svzero_za64_vg1x4)))

4540
lib/include/arm_sve.h vendored

File diff suppressed because it is too large Load Diff

View File

@ -441,7 +441,7 @@ _mm512_maskz_sqrt_pbh(__mmask32 __U, __m512bh __A) {
static __inline__ __m512bh __DEFAULT_FN_ATTRS512
_mm512_fmadd_pbh(__m512bh __A, __m512bh __B, __m512bh __C) {
return (__m512bh)__builtin_ia32_vfmaddnepbh512((__v32bf)__A, (__v32bf)__B,
return (__m512bh)__builtin_ia32_vfmaddbf16512((__v32bf)__A, (__v32bf)__B,
(__v32bf)__C);
}
@ -469,7 +469,7 @@ static __inline__ __m512bh __DEFAULT_FN_ATTRS512 _mm512_maskz_fmadd_pbh(
static __inline__ __m512bh __DEFAULT_FN_ATTRS512
_mm512_fmsub_pbh(__m512bh __A, __m512bh __B, __m512bh __C) {
return (__m512bh)__builtin_ia32_vfmaddnepbh512((__v32bf)__A, (__v32bf)__B,
return (__m512bh)__builtin_ia32_vfmaddbf16512((__v32bf)__A, (__v32bf)__B,
-(__v32bf)__C);
}
@ -497,7 +497,7 @@ static __inline__ __m512bh __DEFAULT_FN_ATTRS512 _mm512_maskz_fmsub_pbh(
static __inline__ __m512bh __DEFAULT_FN_ATTRS512
_mm512_fnmadd_pbh(__m512bh __A, __m512bh __B, __m512bh __C) {
return (__m512bh)__builtin_ia32_vfmaddnepbh512((__v32bf)__A, -(__v32bf)__B,
return (__m512bh)__builtin_ia32_vfmaddbf16512((__v32bf)__A, -(__v32bf)__B,
(__v32bf)__C);
}
@ -527,7 +527,7 @@ static __inline__ __m512bh __DEFAULT_FN_ATTRS512 _mm512_maskz_fnmadd_pbh(
static __inline__ __m512bh __DEFAULT_FN_ATTRS512
_mm512_fnmsub_pbh(__m512bh __A, __m512bh __B, __m512bh __C) {
return (__m512bh)__builtin_ia32_vfmaddnepbh512((__v32bf)__A, -(__v32bf)__B,
return (__m512bh)__builtin_ia32_vfmaddbf16512((__v32bf)__A, -(__v32bf)__B,
-(__v32bf)__C);
}

View File

@ -78,20 +78,20 @@ _mm512_maskz_cvtbiasph_bf8(__mmask32 __U, __m512i __A, __m512h __B) {
}
static __inline__ __m256i __DEFAULT_FN_ATTRS512
_mm512_cvtbiassph_bf8(__m512i __A, __m512h __B) {
_mm512_cvts_biasph_bf8(__m512i __A, __m512h __B) {
return (__m256i)__builtin_ia32_vcvtbiasph2bf8s_512_mask(
(__v64qi)__A, (__v32hf)__B, (__v32qi)_mm256_undefined_si256(),
(__mmask32)-1);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS512 _mm512_mask_cvtbiassph_bf8(
static __inline__ __m256i __DEFAULT_FN_ATTRS512 _mm512_mask_cvts_biasph_bf8(
__m256i __W, __mmask32 __U, __m512i __A, __m512h __B) {
return (__m256i)__builtin_ia32_vcvtbiasph2bf8s_512_mask(
(__v64qi)__A, (__v32hf)__B, (__v32qi)(__m256i)__W, (__mmask32)__U);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS512
_mm512_maskz_cvtbiassph_bf8(__mmask32 __U, __m512i __A, __m512h __B) {
_mm512_maskz_cvts_biasph_bf8(__mmask32 __U, __m512i __A, __m512h __B) {
return (__m256i)__builtin_ia32_vcvtbiasph2bf8s_512_mask(
(__v64qi)__A, (__v32hf)__B, (__v32qi)(__m256i)_mm256_setzero_si256(),
(__mmask32)__U);
@ -118,20 +118,20 @@ _mm512_maskz_cvtbiasph_hf8(__mmask32 __U, __m512i __A, __m512h __B) {
}
static __inline__ __m256i __DEFAULT_FN_ATTRS512
_mm512_cvtbiassph_hf8(__m512i __A, __m512h __B) {
_mm512_cvts_biasph_hf8(__m512i __A, __m512h __B) {
return (__m256i)__builtin_ia32_vcvtbiasph2hf8s_512_mask(
(__v64qi)__A, (__v32hf)__B, (__v32qi)_mm256_undefined_si256(),
(__mmask32)-1);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS512 _mm512_mask_cvtbiassph_hf8(
static __inline__ __m256i __DEFAULT_FN_ATTRS512 _mm512_mask_cvts_biasph_hf8(
__m256i __W, __mmask32 __U, __m512i __A, __m512h __B) {
return (__m256i)__builtin_ia32_vcvtbiasph2hf8s_512_mask(
(__v64qi)__A, (__v32hf)__B, (__v32qi)(__m256i)__W, (__mmask32)__U);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS512
_mm512_maskz_cvtbiassph_hf8(__mmask32 __U, __m512i __A, __m512h __B) {
_mm512_maskz_cvts_biasph_hf8(__mmask32 __U, __m512i __A, __m512h __B) {
return (__m256i)__builtin_ia32_vcvtbiasph2hf8s_512_mask(
(__v64qi)__A, (__v32hf)__B, (__v32qi)(__m256i)_mm256_setzero_si256(),
(__mmask32)__U);
@ -157,21 +157,21 @@ _mm512_maskz_cvt2ph_bf8(__mmask64 __U, __m512h __A, __m512h __B) {
}
static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_cvts2ph_bf8(__m512h __A, __m512h __B) {
_mm512_cvts_2ph_bf8(__m512h __A, __m512h __B) {
return (__m512i)__builtin_ia32_vcvt2ph2bf8s_512((__v32hf)(__A),
(__v32hf)(__B));
}
static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_mask_cvts2ph_bf8(__m512i __W, __mmask64 __U, __m512h __A, __m512h __B) {
_mm512_mask_cvts_2ph_bf8(__m512i __W, __mmask64 __U, __m512h __A, __m512h __B) {
return (__m512i)__builtin_ia32_selectb_512(
(__mmask64)__U, (__v64qi)_mm512_cvts2ph_bf8(__A, __B), (__v64qi)__W);
(__mmask64)__U, (__v64qi)_mm512_cvts_2ph_bf8(__A, __B), (__v64qi)__W);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_maskz_cvts2ph_bf8(__mmask64 __U, __m512h __A, __m512h __B) {
_mm512_maskz_cvts_2ph_bf8(__mmask64 __U, __m512h __A, __m512h __B) {
return (__m512i)__builtin_ia32_selectb_512(
(__mmask64)__U, (__v64qi)_mm512_cvts2ph_bf8(__A, __B),
(__mmask64)__U, (__v64qi)_mm512_cvts_2ph_bf8(__A, __B),
(__v64qi)(__m512i)_mm512_setzero_si512());
}
@ -195,37 +195,37 @@ _mm512_maskz_cvt2ph_hf8(__mmask64 __U, __m512h __A, __m512h __B) {
}
static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_cvts2ph_hf8(__m512h __A, __m512h __B) {
_mm512_cvts_2ph_hf8(__m512h __A, __m512h __B) {
return (__m512i)__builtin_ia32_vcvt2ph2hf8s_512((__v32hf)(__A),
(__v32hf)(__B));
}
static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_mask_cvts2ph_hf8(__m512i __W, __mmask64 __U, __m512h __A, __m512h __B) {
_mm512_mask_cvts_2ph_hf8(__m512i __W, __mmask64 __U, __m512h __A, __m512h __B) {
return (__m512i)__builtin_ia32_selectb_512(
(__mmask64)__U, (__v64qi)_mm512_cvts2ph_hf8(__A, __B), (__v64qi)__W);
(__mmask64)__U, (__v64qi)_mm512_cvts_2ph_hf8(__A, __B), (__v64qi)__W);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_maskz_cvts2ph_hf8(__mmask64 __U, __m512h __A, __m512h __B) {
_mm512_maskz_cvts_2ph_hf8(__mmask64 __U, __m512h __A, __m512h __B) {
return (__m512i)__builtin_ia32_selectb_512(
(__mmask64)__U, (__v64qi)_mm512_cvts2ph_hf8(__A, __B),
(__mmask64)__U, (__v64qi)_mm512_cvts_2ph_hf8(__A, __B),
(__v64qi)(__m512i)_mm512_setzero_si512());
}
static __inline__ __m512h __DEFAULT_FN_ATTRS512 _mm512_cvthf8(__m256i __A) {
static __inline__ __m512h __DEFAULT_FN_ATTRS512 _mm512_cvthf8_ph(__m256i __A) {
return (__m512h)__builtin_ia32_vcvthf8_2ph512_mask(
(__v32qi)__A, (__v32hf)(__m512h)_mm512_undefined_ph(), (__mmask32)-1);
}
static __inline__ __m512h __DEFAULT_FN_ATTRS512
_mm512_mask_cvthf8(__m512h __W, __mmask32 __U, __m256i __A) {
_mm512_mask_cvthf8_ph(__m512h __W, __mmask32 __U, __m256i __A) {
return (__m512h)__builtin_ia32_vcvthf8_2ph512_mask(
(__v32qi)__A, (__v32hf)(__m512h)__W, (__mmask32)__U);
}
static __inline__ __m512h __DEFAULT_FN_ATTRS512
_mm512_maskz_cvthf8(__mmask32 __U, __m256i __A) {
_mm512_maskz_cvthf8_ph(__mmask32 __U, __m256i __A) {
return (__m512h)__builtin_ia32_vcvthf8_2ph512_mask(
(__v32qi)__A, (__v32hf)(__m512h)_mm512_setzero_ph(), (__mmask32)__U);
}
@ -247,19 +247,20 @@ _mm512_maskz_cvtph_bf8(__mmask32 __U, __m512h __A) {
(__v32hf)__A, (__v32qi)(__m256i)_mm256_setzero_si256(), (__mmask32)__U);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS512 _mm512_cvtsph_bf8(__m512h __A) {
static __inline__ __m256i __DEFAULT_FN_ATTRS512
_mm512_cvts_ph_bf8(__m512h __A) {
return (__m256i)__builtin_ia32_vcvtph2bf8s_512_mask(
(__v32hf)__A, (__v32qi)(__m256i)_mm256_undefined_si256(), (__mmask32)-1);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS512
_mm512_mask_cvtsph_bf8(__m256i __W, __mmask32 __U, __m512h __A) {
_mm512_mask_cvts_ph_bf8(__m256i __W, __mmask32 __U, __m512h __A) {
return (__m256i)__builtin_ia32_vcvtph2bf8s_512_mask(
(__v32hf)__A, (__v32qi)(__m256i)__W, (__mmask32)__U);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS512
_mm512_maskz_cvtsph_bf8(__mmask32 __U, __m512h __A) {
_mm512_maskz_cvts_ph_bf8(__mmask32 __U, __m512h __A) {
return (__m256i)__builtin_ia32_vcvtph2bf8s_512_mask(
(__v32hf)__A, (__v32qi)(__m256i)_mm256_setzero_si256(), (__mmask32)__U);
}
@ -281,19 +282,20 @@ _mm512_maskz_cvtph_hf8(__mmask32 __U, __m512h __A) {
(__v32hf)__A, (__v32qi)(__m256i)_mm256_setzero_si256(), (__mmask32)__U);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS512 _mm512_cvtsph_hf8(__m512h __A) {
static __inline__ __m256i __DEFAULT_FN_ATTRS512
_mm512_cvts_ph_hf8(__m512h __A) {
return (__m256i)__builtin_ia32_vcvtph2hf8s_512_mask(
(__v32hf)__A, (__v32qi)(__m256i)_mm256_undefined_si256(), (__mmask32)-1);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS512
_mm512_mask_cvtsph_hf8(__m256i __W, __mmask32 __U, __m512h __A) {
_mm512_mask_cvts_ph_hf8(__m256i __W, __mmask32 __U, __m512h __A) {
return (__m256i)__builtin_ia32_vcvtph2hf8s_512_mask(
(__v32hf)__A, (__v32qi)(__m256i)__W, (__mmask32)__U);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS512
_mm512_maskz_cvtsph_hf8(__mmask32 __U, __m512h __A) {
_mm512_maskz_cvts_ph_hf8(__mmask32 __U, __m512h __A) {
return (__m256i)__builtin_ia32_vcvtph2hf8s_512_mask(
(__v32hf)__A, (__v32qi)(__m256i)_mm256_setzero_si256(), (__mmask32)__U);
}

View File

@ -20,20 +20,21 @@
__min_vector_width__(512)))
// 512 bit : Double -> Int
static __inline__ __m256i __DEFAULT_FN_ATTRS _mm512_cvttspd_epi32(__m512d __A) {
static __inline__ __m256i __DEFAULT_FN_ATTRS
_mm512_cvtts_pd_epi32(__m512d __A) {
return ((__m256i)__builtin_ia32_vcvttpd2dqs512_round_mask(
(__v8df)__A, (__v8si)_mm256_undefined_si256(), (__mmask8)-1,
_MM_FROUND_CUR_DIRECTION));
}
static __inline__ __m256i __DEFAULT_FN_ATTRS
_mm512_mask_cvttspd_epi32(__m256i __W, __mmask8 __U, __m512d __A) {
_mm512_mask_cvtts_pd_epi32(__m256i __W, __mmask8 __U, __m512d __A) {
return ((__m256i)__builtin_ia32_vcvttpd2dqs512_round_mask(
(__v8df)__A, (__v8si)__W, __U, _MM_FROUND_CUR_DIRECTION));
}
static __inline__ __m256i __DEFAULT_FN_ATTRS
_mm512_maskz_cvttspd_epi32(__mmask8 __U, __m512d __A) {
_mm512_maskz_cvtts_pd_epi32(__mmask8 __U, __m512d __A) {
return ((__m256i)__builtin_ia32_vcvttpd2dqs512_round_mask(
(__v8df)__A, (__v8si)_mm256_setzero_si256(), __U,
_MM_FROUND_CUR_DIRECTION));
@ -55,20 +56,21 @@ _mm512_maskz_cvttspd_epi32(__mmask8 __U, __m512d __A) {
(const int)(__R)))
// 512 bit : Double -> uInt
static __inline__ __m256i __DEFAULT_FN_ATTRS _mm512_cvttspd_epu32(__m512d __A) {
static __inline__ __m256i __DEFAULT_FN_ATTRS
_mm512_cvtts_pd_epu32(__m512d __A) {
return ((__m256i)__builtin_ia32_vcvttpd2udqs512_round_mask(
(__v8df)__A, (__v8si)_mm256_undefined_si256(), (__mmask8)-1,
_MM_FROUND_CUR_DIRECTION));
}
static __inline__ __m256i __DEFAULT_FN_ATTRS
_mm512_mask_cvttspd_epu32(__m256i __W, __mmask8 __U, __m512d __A) {
_mm512_mask_cvtts_pd_epu32(__m256i __W, __mmask8 __U, __m512d __A) {
return ((__m256i)__builtin_ia32_vcvttpd2udqs512_round_mask(
(__v8df)__A, (__v8si)__W, __U, _MM_FROUND_CUR_DIRECTION));
}
static __inline__ __m256i __DEFAULT_FN_ATTRS
_mm512_maskz_cvttspd_epu32(__mmask8 __U, __m512d __A) {
_mm512_maskz_cvtts_pd_epu32(__mmask8 __U, __m512d __A) {
return ((__m256i)__builtin_ia32_vcvttpd2udqs512_round_mask(
(__v8df)__A, (__v8si)_mm256_setzero_si256(), __U,
_MM_FROUND_CUR_DIRECTION));
@ -91,18 +93,19 @@ _mm512_maskz_cvttspd_epu32(__mmask8 __U, __m512d __A) {
// 512 bit : Double -> Long
static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_cvttspd_epi64(__m512d __A) {
static __inline__ __m512i __DEFAULT_FN_ATTRS
_mm512_cvtts_pd_epi64(__m512d __A) {
return ((__m512i)__builtin_ia32_vcvttpd2qqs512_round_mask(
(__v8df)__A, (__v8di)_mm512_undefined_epi32(), (__mmask8)-1,
_MM_FROUND_CUR_DIRECTION));
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
_mm512_mask_cvttspd_epi64(__m512i __W, __mmask8 __U, __m512d __A) {
_mm512_mask_cvtts_pd_epi64(__m512i __W, __mmask8 __U, __m512d __A) {
return ((__m512i)__builtin_ia32_vcvttpd2qqs512_round_mask(
(__v8df)__A, (__v8di)__W, __U, _MM_FROUND_CUR_DIRECTION));
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
_mm512_maskz_cvttspd_epi64(__mmask8 __U, __m512d __A) {
_mm512_maskz_cvtts_pd_epi64(__mmask8 __U, __m512d __A) {
return ((__m512i)__builtin_ia32_vcvttpd2qqs512_round_mask(
(__v8df)__A, (__v8di)_mm512_setzero_si512(), __U,
_MM_FROUND_CUR_DIRECTION));
@ -125,20 +128,21 @@ _mm512_maskz_cvttspd_epi64(__mmask8 __U, __m512d __A) {
// 512 bit : Double -> ULong
static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_cvttspd_epu64(__m512d __A) {
static __inline__ __m512i __DEFAULT_FN_ATTRS
_mm512_cvtts_pd_epu64(__m512d __A) {
return ((__m512i)__builtin_ia32_vcvttpd2uqqs512_round_mask(
(__v8df)__A, (__v8di)_mm512_undefined_epi32(), (__mmask8)-1,
_MM_FROUND_CUR_DIRECTION));
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
_mm512_mask_cvttspd_epu64(__m512i __W, __mmask8 __U, __m512d __A) {
_mm512_mask_cvtts_pd_epu64(__m512i __W, __mmask8 __U, __m512d __A) {
return ((__m512i)__builtin_ia32_vcvttpd2uqqs512_round_mask(
(__v8df)__A, (__v8di)__W, __U, _MM_FROUND_CUR_DIRECTION));
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
_mm512_maskz_cvttspd_epu64(__mmask8 __U, __m512d __A) {
_mm512_maskz_cvtts_pd_epu64(__mmask8 __U, __m512d __A) {
return ((__m512i)__builtin_ia32_vcvttpd2uqqs512_round_mask(
(__v8df)__A, (__v8di)_mm512_setzero_si512(), __U,
_MM_FROUND_CUR_DIRECTION));
@ -160,20 +164,20 @@ _mm512_maskz_cvttspd_epu64(__mmask8 __U, __m512d __A) {
(const int)(__R)))
// 512 bit: Float -> int
static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_cvttsps_epi32(__m512 __A) {
static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_cvtts_ps_epi32(__m512 __A) {
return ((__m512i)__builtin_ia32_vcvttps2dqs512_round_mask(
(__v16sf)(__A), (__v16si)_mm512_undefined_epi32(), (__mmask16)-1,
_MM_FROUND_CUR_DIRECTION));
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
_mm512_mask_cvttsps_epi32(__m512i __W, __mmask16 __U, __m512 __A) {
_mm512_mask_cvtts_ps_epi32(__m512i __W, __mmask16 __U, __m512 __A) {
return ((__m512i)__builtin_ia32_vcvttps2dqs512_round_mask(
(__v16sf)(__A), (__v16si)(__W), __U, _MM_FROUND_CUR_DIRECTION));
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
_mm512_maskz_cvttsps_epi32(__mmask16 __U, __m512 __A) {
_mm512_maskz_cvtts_ps_epi32(__mmask16 __U, __m512 __A) {
return ((__m512i)__builtin_ia32_vcvttps2dqs512_round_mask(
(__v16sf)(__A), (__v16si)_mm512_setzero_si512(), __U,
_MM_FROUND_CUR_DIRECTION));
@ -195,20 +199,20 @@ _mm512_maskz_cvttsps_epi32(__mmask16 __U, __m512 __A) {
(__mmask16)(__U), (const int)(__R)))
// 512 bit: Float -> uint
static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_cvttsps_epu32(__m512 __A) {
static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_cvtts_ps_epu32(__m512 __A) {
return ((__m512i)__builtin_ia32_vcvttps2udqs512_round_mask(
(__v16sf)(__A), (__v16si)_mm512_undefined_epi32(), (__mmask16)-1,
_MM_FROUND_CUR_DIRECTION));
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
_mm512_mask_cvttsps_epu32(__m512i __W, __mmask16 __U, __m512 __A) {
_mm512_mask_cvtts_ps_epu32(__m512i __W, __mmask16 __U, __m512 __A) {
return ((__m512i)__builtin_ia32_vcvttps2udqs512_round_mask(
(__v16sf)(__A), (__v16si)(__W), __U, _MM_FROUND_CUR_DIRECTION));
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
_mm512_maskz_cvttsps_epu32(__mmask16 __U, __m512 __A) {
_mm512_maskz_cvtts_ps_epu32(__mmask16 __U, __m512 __A) {
return ((__m512i)__builtin_ia32_vcvttps2udqs512_round_mask(
(__v16sf)(__A), (__v16si)_mm512_setzero_si512(), __U,
_MM_FROUND_CUR_DIRECTION));
@ -230,20 +234,20 @@ _mm512_maskz_cvttsps_epu32(__mmask16 __U, __m512 __A) {
(__mmask16)(__U), (const int)(__R)))
// 512 bit : float -> long
static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_cvttsps_epi64(__m256 __A) {
static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_cvtts_ps_epi64(__m256 __A) {
return ((__m512i)__builtin_ia32_vcvttps2qqs512_round_mask(
(__v8sf)__A, (__v8di)_mm512_undefined_epi32(), (__mmask8)-1,
_MM_FROUND_CUR_DIRECTION));
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
_mm512_mask_cvttsps_epi64(__m512i __W, __mmask8 __U, __m256 __A) {
_mm512_mask_cvtts_ps_epi64(__m512i __W, __mmask8 __U, __m256 __A) {
return ((__m512i)__builtin_ia32_vcvttps2qqs512_round_mask(
(__v8sf)__A, (__v8di)__W, __U, _MM_FROUND_CUR_DIRECTION));
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
_mm512_maskz_cvttsps_epi64(__mmask8 __U, __m256 __A) {
_mm512_maskz_cvtts_ps_epi64(__mmask8 __U, __m256 __A) {
return ((__m512i)__builtin_ia32_vcvttps2qqs512_round_mask(
(__v8sf)__A, (__v8di)_mm512_setzero_si512(), __U,
_MM_FROUND_CUR_DIRECTION));
@ -265,20 +269,20 @@ _mm512_maskz_cvttsps_epi64(__mmask8 __U, __m256 __A) {
(const int)(__R)))
// 512 bit : float -> ulong
static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_cvttsps_epu64(__m256 __A) {
static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_cvtts_ps_epu64(__m256 __A) {
return ((__m512i)__builtin_ia32_vcvttps2uqqs512_round_mask(
(__v8sf)__A, (__v8di)_mm512_undefined_epi32(), (__mmask8)-1,
_MM_FROUND_CUR_DIRECTION));
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
_mm512_mask_cvttsps_epu64(__m512i __W, __mmask8 __U, __m256 __A) {
_mm512_mask_cvtts_ps_epu64(__m512i __W, __mmask8 __U, __m256 __A) {
return ((__m512i)__builtin_ia32_vcvttps2uqqs512_round_mask(
(__v8sf)__A, (__v8di)__W, __U, _MM_FROUND_CUR_DIRECTION));
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
_mm512_maskz_cvttsps_epu64(__mmask8 __U, __m256 __A) {
_mm512_maskz_cvtts_ps_epu64(__mmask8 __U, __m256 __A) {
return ((__m512i)__builtin_ia32_vcvttps2uqqs512_round_mask(
(__v8sf)__A, (__v8di)_mm512_setzero_si512(), __U,
_MM_FROUND_CUR_DIRECTION));

View File

@ -14,286 +14,286 @@
#ifndef __AVX10_2_512SATCVTINTRIN_H
#define __AVX10_2_512SATCVTINTRIN_H
#define _mm512_ipcvtbf16_epi8(A) \
#define _mm512_ipcvts_bf16_epi8(A) \
((__m512i)__builtin_ia32_vcvtbf162ibs512((__v32bf)(__m512bh)(A)))
#define _mm512_mask_ipcvtbf16_epi8(W, U, A) \
#define _mm512_mask_ipcvts_bf16_epi8(W, U, A) \
((__m512i)__builtin_ia32_selectw_512((__mmask32)(U), \
(__v32hi)_mm512_ipcvtbf16_epi8(A), \
(__v32hi)_mm512_ipcvts_bf16_epi8(A), \
(__v32hi)(__m512i)(W)))
#define _mm512_maskz_ipcvtbf16_epi8(U, A) \
#define _mm512_maskz_ipcvts_bf16_epi8(U, A) \
((__m512i)__builtin_ia32_selectw_512((__mmask32)(U), \
(__v32hi)_mm512_ipcvtbf16_epi8(A), \
(__v32hi)_mm512_ipcvts_bf16_epi8(A), \
(__v32hi)_mm512_setzero_si512()))
#define _mm512_ipcvtbf16_epu8(A) \
#define _mm512_ipcvts_bf16_epu8(A) \
((__m512i)__builtin_ia32_vcvtbf162iubs512((__v32bf)(__m512bh)(A)))
#define _mm512_mask_ipcvtbf16_epu8(W, U, A) \
#define _mm512_mask_ipcvts_bf16_epu8(W, U, A) \
((__m512i)__builtin_ia32_selectw_512((__mmask32)(U), \
(__v32hi)_mm512_ipcvtbf16_epu8(A), \
(__v32hi)_mm512_ipcvts_bf16_epu8(A), \
(__v32hi)(__m512i)(W)))
#define _mm512_maskz_ipcvtbf16_epu8(U, A) \
#define _mm512_maskz_ipcvts_bf16_epu8(U, A) \
((__m512i)__builtin_ia32_selectw_512((__mmask32)(U), \
(__v32hi)_mm512_ipcvtbf16_epu8(A), \
(__v32hi)_mm512_ipcvts_bf16_epu8(A), \
(__v32hi)_mm512_setzero_si512()))
#define _mm512_ipcvttbf16_epi8(A) \
#define _mm512_ipcvtts_bf16_epi8(A) \
((__m512i)__builtin_ia32_vcvttbf162ibs512((__v32bf)(__m512bh)(A)))
#define _mm512_mask_ipcvttbf16_epi8(W, U, A) \
#define _mm512_mask_ipcvtts_bf16_epi8(W, U, A) \
((__m512i)__builtin_ia32_selectw_512((__mmask32)(U), \
(__v32hi)_mm512_ipcvttbf16_epi8(A), \
(__v32hi)_mm512_ipcvtts_bf16_epi8(A), \
(__v32hi)(__m512i)(W)))
#define _mm512_maskz_ipcvttbf16_epi8(U, A) \
#define _mm512_maskz_ipcvtts_bf16_epi8(U, A) \
((__m512i)__builtin_ia32_selectw_512((__mmask32)(U), \
(__v32hi)_mm512_ipcvttbf16_epi8(A), \
(__v32hi)_mm512_ipcvtts_bf16_epi8(A), \
(__v32hi)_mm512_setzero_si512()))
#define _mm512_ipcvttbf16_epu8(A) \
#define _mm512_ipcvtts_bf16_epu8(A) \
((__m512i)__builtin_ia32_vcvttbf162iubs512((__v32bf)(__m512bh)(A)))
#define _mm512_mask_ipcvttbf16_epu8(W, U, A) \
#define _mm512_mask_ipcvtts_bf16_epu8(W, U, A) \
((__m512i)__builtin_ia32_selectw_512((__mmask32)(U), \
(__v32hi)_mm512_ipcvttbf16_epu8(A), \
(__v32hi)_mm512_ipcvtts_bf16_epu8(A), \
(__v32hi)(__m512i)(W)))
#define _mm512_maskz_ipcvttbf16_epu8(U, A) \
#define _mm512_maskz_ipcvtts_bf16_epu8(U, A) \
((__m512i)__builtin_ia32_selectw_512((__mmask32)(U), \
(__v32hi)_mm512_ipcvttbf16_epu8(A), \
(__v32hi)_mm512_ipcvtts_bf16_epu8(A), \
(__v32hi)_mm512_setzero_si512()))
#define _mm512_ipcvtph_epi8(A) \
#define _mm512_ipcvts_ph_epi8(A) \
((__m512i)__builtin_ia32_vcvtph2ibs512_mask( \
(__v32hf)(__m512h)(A), (__v32hu)_mm512_setzero_si512(), (__mmask32) - 1, \
_MM_FROUND_CUR_DIRECTION))
#define _mm512_mask_ipcvtph_epi8(W, U, A) \
#define _mm512_mask_ipcvts_ph_epi8(W, U, A) \
((__m512i)__builtin_ia32_vcvtph2ibs512_mask((__v32hf)(__m512h)(A), \
(__v32hu)(W), (__mmask32)(U), \
_MM_FROUND_CUR_DIRECTION))
#define _mm512_maskz_ipcvtph_epi8(U, A) \
#define _mm512_maskz_ipcvts_ph_epi8(U, A) \
((__m512i)__builtin_ia32_vcvtph2ibs512_mask( \
(__v32hf)(__m512h)(A), (__v32hu)_mm512_setzero_si512(), (__mmask32)(U), \
_MM_FROUND_CUR_DIRECTION))
#define _mm512_ipcvt_roundph_epi8(A, R) \
#define _mm512_ipcvts_roundph_epi8(A, R) \
((__m512i)__builtin_ia32_vcvtph2ibs512_mask((__v32hf)(__m512h)(A), \
(__v32hu)_mm512_setzero_si512(), \
(__mmask32) - 1, (const int)R))
#define _mm512_mask_ipcvt_roundph_epi8(W, U, A, R) \
#define _mm512_mask_ipcvts_roundph_epi8(W, U, A, R) \
((__m512i)__builtin_ia32_vcvtph2ibs512_mask( \
(__v32hf)(__m512h)(A), (__v32hu)(W), (__mmask32)(U), (const int)R))
#define _mm512_maskz_ipcvt_roundph_epi8(U, A, R) \
#define _mm512_maskz_ipcvts_roundph_epi8(U, A, R) \
((__m512i)__builtin_ia32_vcvtph2ibs512_mask((__v32hf)(__m512h)(A), \
(__v32hu)_mm512_setzero_si512(), \
(__mmask32)(U), (const int)R))
#define _mm512_ipcvtph_epu8(A) \
#define _mm512_ipcvts_ph_epu8(A) \
((__m512i)__builtin_ia32_vcvtph2iubs512_mask( \
(__v32hf)(__m512h)(A), (__v32hu)_mm512_setzero_si512(), (__mmask32) - 1, \
_MM_FROUND_CUR_DIRECTION))
#define _mm512_mask_ipcvtph_epu8(W, U, A) \
#define _mm512_mask_ipcvts_ph_epu8(W, U, A) \
((__m512i)__builtin_ia32_vcvtph2iubs512_mask((__v32hf)(__m512h)(A), \
(__v32hu)(W), (__mmask32)(U), \
_MM_FROUND_CUR_DIRECTION))
#define _mm512_maskz_ipcvtph_epu8(U, A) \
#define _mm512_maskz_ipcvts_ph_epu8(U, A) \
((__m512i)__builtin_ia32_vcvtph2iubs512_mask( \
(__v32hf)(__m512h)(A), (__v32hu)_mm512_setzero_si512(), (__mmask32)(U), \
_MM_FROUND_CUR_DIRECTION))
#define _mm512_ipcvt_roundph_epu8(A, R) \
#define _mm512_ipcvts_roundph_epu8(A, R) \
((__m512i)__builtin_ia32_vcvtph2iubs512_mask( \
(__v32hf)(__m512h)(A), (__v32hu)_mm512_setzero_si512(), (__mmask32) - 1, \
(const int)R))
#define _mm512_mask_ipcvt_roundph_epu8(W, U, A, R) \
#define _mm512_mask_ipcvts_roundph_epu8(W, U, A, R) \
((__m512i)__builtin_ia32_vcvtph2iubs512_mask( \
(__v32hf)(__m512h)(A), (__v32hu)(W), (__mmask32)(U), (const int)R))
#define _mm512_maskz_ipcvt_roundph_epu8(U, A, R) \
#define _mm512_maskz_ipcvts_roundph_epu8(U, A, R) \
((__m512i)__builtin_ia32_vcvtph2iubs512_mask( \
(__v32hf)(__m512h)(A), (__v32hu)_mm512_setzero_si512(), (__mmask32)(U), \
(const int)R))
#define _mm512_ipcvtps_epi8(A) \
#define _mm512_ipcvts_ps_epi8(A) \
((__m512i)__builtin_ia32_vcvtps2ibs512_mask( \
(__v16sf)(__m512)(A), (__v16su)_mm512_setzero_si512(), (__mmask16) - 1, \
_MM_FROUND_CUR_DIRECTION))
#define _mm512_mask_ipcvtps_epi8(W, U, A) \
#define _mm512_mask_ipcvts_ps_epi8(W, U, A) \
((__m512i)__builtin_ia32_vcvtps2ibs512_mask((__v16sf)(__m512)(A), \
(__v16su)(W), (__mmask16)(U), \
_MM_FROUND_CUR_DIRECTION))
#define _mm512_maskz_ipcvtps_epi8(U, A) \
#define _mm512_maskz_ipcvts_ps_epi8(U, A) \
((__m512i)__builtin_ia32_vcvtps2ibs512_mask( \
(__v16sf)(__m512)(A), (__v16su)_mm512_setzero_si512(), (__mmask16)(U), \
_MM_FROUND_CUR_DIRECTION))
#define _mm512_ipcvt_roundps_epi8(A, R) \
#define _mm512_ipcvts_roundps_epi8(A, R) \
((__m512i)__builtin_ia32_vcvtps2ibs512_mask((__v16sf)(__m512)(A), \
(__v16su)_mm512_setzero_si512(), \
(__mmask16) - 1, (const int)R))
#define _mm512_mask_ipcvt_roundps_epi8(W, U, A, R) \
#define _mm512_mask_ipcvts_roundps_epi8(W, U, A, R) \
((__m512i)__builtin_ia32_vcvtps2ibs512_mask( \
(__v16sf)(__m512)(A), (__v16su)(W), (__mmask16)(U), (const int)R))
#define _mm512_maskz_ipcvt_roundps_epi8(U, A, R) \
#define _mm512_maskz_ipcvts_roundps_epi8(U, A, R) \
((__m512i)__builtin_ia32_vcvtps2ibs512_mask((__v16sf)(__m512)(A), \
(__v16su)_mm512_setzero_si512(), \
(__mmask16)(U), (const int)R))
#define _mm512_ipcvtps_epu8(A) \
#define _mm512_ipcvts_ps_epu8(A) \
((__m512i)__builtin_ia32_vcvtps2iubs512_mask( \
(__v16sf)(__m512)(A), (__v16su)_mm512_setzero_si512(), (__mmask16) - 1, \
_MM_FROUND_CUR_DIRECTION))
#define _mm512_mask_ipcvtps_epu8(W, U, A) \
#define _mm512_mask_ipcvts_ps_epu8(W, U, A) \
((__m512i)__builtin_ia32_vcvtps2iubs512_mask((__v16sf)(__m512)(A), \
(__v16su)(W), (__mmask16)(U), \
_MM_FROUND_CUR_DIRECTION))
#define _mm512_maskz_ipcvtps_epu8(U, A) \
#define _mm512_maskz_ipcvts_ps_epu8(U, A) \
((__m512i)__builtin_ia32_vcvtps2iubs512_mask( \
(__v16sf)(__m512)(A), (__v16su)_mm512_setzero_si512(), (__mmask16)(U), \
_MM_FROUND_CUR_DIRECTION))
#define _mm512_ipcvt_roundps_epu8(A, R) \
#define _mm512_ipcvts_roundps_epu8(A, R) \
((__m512i)__builtin_ia32_vcvtps2iubs512_mask( \
(__v16sf)(__m512)(A), (__v16su)_mm512_setzero_si512(), (__mmask16) - 1, \
(const int)R))
#define _mm512_mask_ipcvt_roundps_epu8(W, U, A, R) \
#define _mm512_mask_ipcvts_roundps_epu8(W, U, A, R) \
((__m512i)__builtin_ia32_vcvtps2iubs512_mask( \
(__v16sf)(__m512)(A), (__v16su)(W), (__mmask16)(U), (const int)R))
#define _mm512_maskz_ipcvt_roundps_epu8(U, A, R) \
#define _mm512_maskz_ipcvts_roundps_epu8(U, A, R) \
((__m512i)__builtin_ia32_vcvtps2iubs512_mask( \
(__v16sf)(__m512)(A), (__v16su)_mm512_setzero_si512(), (__mmask16)(U), \
(const int)R))
#define _mm512_ipcvttph_epi8(A) \
#define _mm512_ipcvtts_ph_epi8(A) \
((__m512i)__builtin_ia32_vcvttph2ibs512_mask( \
(__v32hf)(__m512h)(A), (__v32hu)_mm512_setzero_si512(), (__mmask32) - 1, \
_MM_FROUND_CUR_DIRECTION))
#define _mm512_mask_ipcvttph_epi8(W, U, A) \
#define _mm512_mask_ipcvtts_ph_epi8(W, U, A) \
((__m512i)__builtin_ia32_vcvttph2ibs512_mask((__v32hf)(__m512h)(A), \
(__v32hu)(W), (__mmask32)(U), \
_MM_FROUND_CUR_DIRECTION))
#define _mm512_maskz_ipcvttph_epi8(U, A) \
#define _mm512_maskz_ipcvtts_ph_epi8(U, A) \
((__m512i)__builtin_ia32_vcvttph2ibs512_mask( \
(__v32hf)(__m512h)(A), (__v32hu)_mm512_setzero_si512(), (__mmask32)(U), \
_MM_FROUND_CUR_DIRECTION))
#define _mm512_ipcvtt_roundph_epi8(A, S) \
#define _mm512_ipcvtts_roundph_epi8(A, S) \
((__m512i)__builtin_ia32_vcvttph2ibs512_mask( \
(__v32hf)(__m512h)(A), (__v32hu)_mm512_setzero_si512(), (__mmask32) - 1, \
S))
#define _mm512_mask_ipcvtt_roundph_epi8(W, U, A, S) \
#define _mm512_mask_ipcvtts_roundph_epi8(W, U, A, S) \
((__m512i)__builtin_ia32_vcvttph2ibs512_mask( \
(__v32hf)(__m512h)(A), (__v32hu)(W), (__mmask32)(U), S))
#define _mm512_maskz_ipcvtt_roundph_epi8(U, A, S) \
#define _mm512_maskz_ipcvtts_roundph_epi8(U, A, S) \
((__m512i)__builtin_ia32_vcvttph2ibs512_mask( \
(__v32hf)(__m512h)(A), (__v32hu)_mm512_setzero_si512(), (__mmask32)(U), \
S))
#define _mm512_ipcvttph_epu8(A) \
#define _mm512_ipcvtts_ph_epu8(A) \
((__m512i)__builtin_ia32_vcvttph2iubs512_mask( \
(__v32hf)(__m512h)(A), (__v32hu)_mm512_setzero_si512(), (__mmask32) - 1, \
_MM_FROUND_CUR_DIRECTION))
#define _mm512_mask_ipcvttph_epu8(W, U, A) \
#define _mm512_mask_ipcvtts_ph_epu8(W, U, A) \
((__m512i)__builtin_ia32_vcvttph2iubs512_mask((__v32hf)(__m512h)(A), \
(__v32hu)(W), (__mmask32)(U), \
_MM_FROUND_CUR_DIRECTION))
#define _mm512_maskz_ipcvttph_epu8(U, A) \
#define _mm512_maskz_ipcvtts_ph_epu8(U, A) \
((__m512i)__builtin_ia32_vcvttph2iubs512_mask( \
(__v32hf)(__m512h)(A), (__v32hu)_mm512_setzero_si512(), (__mmask32)(U), \
_MM_FROUND_CUR_DIRECTION))
#define _mm512_ipcvtt_roundph_epu8(A, S) \
#define _mm512_ipcvtts_roundph_epu8(A, S) \
((__m512i)__builtin_ia32_vcvttph2iubs512_mask( \
(__v32hf)(__m512h)(A), (__v32hu)_mm512_setzero_si512(), (__mmask32) - 1, \
S))
#define _mm512_mask_ipcvtt_roundph_epu8(W, U, A, S) \
#define _mm512_mask_ipcvtts_roundph_epu8(W, U, A, S) \
((__m512i)__builtin_ia32_vcvttph2iubs512_mask( \
(__v32hf)(__m512h)(A), (__v32hu)(W), (__mmask32)(U), S))
#define _mm512_maskz_ipcvtt_roundph_epu8(U, A, S) \
#define _mm512_maskz_ipcvtts_roundph_epu8(U, A, S) \
((__m512i)__builtin_ia32_vcvttph2iubs512_mask( \
(__v32hf)(__m512h)(A), (__v32hu)_mm512_setzero_si512(), (__mmask32)(U), \
S))
#define _mm512_ipcvttps_epi8(A) \
#define _mm512_ipcvtts_ps_epi8(A) \
((__m512i)__builtin_ia32_vcvttps2ibs512_mask( \
(__v16sf)(__m512h)(A), (__v16su)_mm512_setzero_si512(), (__mmask16) - 1, \
_MM_FROUND_CUR_DIRECTION))
#define _mm512_mask_ipcvttps_epi8(W, U, A) \
#define _mm512_mask_ipcvtts_ps_epi8(W, U, A) \
((__m512i)__builtin_ia32_vcvttps2ibs512_mask((__v16sf)(__m512h)(A), \
(__v16su)(W), (__mmask16)(U), \
_MM_FROUND_CUR_DIRECTION))
#define _mm512_maskz_ipcvttps_epi8(U, A) \
#define _mm512_maskz_ipcvtts_ps_epi8(U, A) \
((__m512i)__builtin_ia32_vcvttps2ibs512_mask( \
(__v16sf)(__m512h)(A), (__v16su)_mm512_setzero_si512(), (__mmask16)(U), \
_MM_FROUND_CUR_DIRECTION))
#define _mm512_ipcvtt_roundps_epi8(A, S) \
#define _mm512_ipcvtts_roundps_epi8(A, S) \
((__m512i)__builtin_ia32_vcvttps2ibs512_mask( \
(__v16sf)(__m512h)(A), (__v16su)_mm512_setzero_si512(), (__mmask16) - 1, \
S))
#define _mm512_mask_ipcvtt_roundps_epi8(W, U, A, S) \
#define _mm512_mask_ipcvtts_roundps_epi8(W, U, A, S) \
((__m512i)__builtin_ia32_vcvttps2ibs512_mask( \
(__v16sf)(__m512h)(A), (__v16su)(W), (__mmask16)(U), S))
#define _mm512_maskz_ipcvtt_roundps_epi8(U, A, S) \
#define _mm512_maskz_ipcvtts_roundps_epi8(U, A, S) \
((__m512i)__builtin_ia32_vcvttps2ibs512_mask( \
(__v16sf)(__m512h)(A), (__v16su)_mm512_setzero_si512(), (__mmask16)(U), \
S))
#define _mm512_ipcvttps_epu8(A) \
#define _mm512_ipcvtts_ps_epu8(A) \
((__m512i)__builtin_ia32_vcvttps2iubs512_mask( \
(__v16sf)(__m512h)(A), (__v16su)_mm512_setzero_si512(), (__mmask16) - 1, \
_MM_FROUND_CUR_DIRECTION))
#define _mm512_mask_ipcvttps_epu8(W, U, A) \
#define _mm512_mask_ipcvtts_ps_epu8(W, U, A) \
((__m512i)__builtin_ia32_vcvttps2iubs512_mask((__v16sf)(__m512h)(A), \
(__v16su)(W), (__mmask16)(U), \
_MM_FROUND_CUR_DIRECTION))
#define _mm512_maskz_ipcvttps_epu8(U, A) \
#define _mm512_maskz_ipcvtts_ps_epu8(U, A) \
((__m512i)__builtin_ia32_vcvttps2iubs512_mask( \
(__v16sf)(__m512h)(A), (__v16su)_mm512_setzero_si512(), (__mmask16)(U), \
_MM_FROUND_CUR_DIRECTION))
#define _mm512_ipcvtt_roundps_epu8(A, S) \
#define _mm512_ipcvtts_roundps_epu8(A, S) \
((__m512i)__builtin_ia32_vcvttps2iubs512_mask( \
(__v16sf)(__m512h)(A), (__v16su)_mm512_setzero_si512(), (__mmask16) - 1, \
S))
#define _mm512_mask_ipcvtt_roundps_epu8(W, U, A, S) \
#define _mm512_mask_ipcvtts_roundps_epu8(W, U, A, S) \
((__m512i)__builtin_ia32_vcvttps2iubs512_mask( \
(__v16sf)(__m512h)(A), (__v16su)(W), (__mmask16)(U), S))
#define _mm512_maskz_ipcvtt_roundps_epu8(U, A, S) \
#define _mm512_maskz_ipcvtts_roundps_epu8(U, A, S) \
((__m512i)__builtin_ia32_vcvttps2iubs512_mask( \
(__v16sf)(__m512h)(A), (__v16su)_mm512_setzero_si512(), (__mmask16)(U), \
S))

View File

@ -852,7 +852,7 @@ _mm_maskz_sqrt_pbh(__mmask8 __U, __m128bh __A) {
static __inline__ __m256bh __DEFAULT_FN_ATTRS256
_mm256_fmadd_pbh(__m256bh __A, __m256bh __B, __m256bh __C) {
return (__m256bh)__builtin_ia32_vfmaddnepbh256((__v16bf)__A, (__v16bf)__B,
return (__m256bh)__builtin_ia32_vfmaddbf16256((__v16bf)__A, (__v16bf)__B,
(__v16bf)__C);
}
@ -880,7 +880,7 @@ static __inline__ __m256bh __DEFAULT_FN_ATTRS256 _mm256_maskz_fmadd_pbh(
static __inline__ __m256bh __DEFAULT_FN_ATTRS256
_mm256_fmsub_pbh(__m256bh __A, __m256bh __B, __m256bh __C) {
return (__m256bh)__builtin_ia32_vfmaddnepbh256((__v16bf)__A, (__v16bf)__B,
return (__m256bh)__builtin_ia32_vfmaddbf16256((__v16bf)__A, (__v16bf)__B,
-(__v16bf)__C);
}
@ -908,7 +908,7 @@ static __inline__ __m256bh __DEFAULT_FN_ATTRS256 _mm256_maskz_fmsub_pbh(
static __inline__ __m256bh __DEFAULT_FN_ATTRS256
_mm256_fnmadd_pbh(__m256bh __A, __m256bh __B, __m256bh __C) {
return (__m256bh)__builtin_ia32_vfmaddnepbh256((__v16bf)__A, -(__v16bf)__B,
return (__m256bh)__builtin_ia32_vfmaddbf16256((__v16bf)__A, -(__v16bf)__B,
(__v16bf)__C);
}
@ -938,7 +938,7 @@ static __inline__ __m256bh __DEFAULT_FN_ATTRS256 _mm256_maskz_fnmadd_pbh(
static __inline__ __m256bh __DEFAULT_FN_ATTRS256
_mm256_fnmsub_pbh(__m256bh __A, __m256bh __B, __m256bh __C) {
return (__m256bh)__builtin_ia32_vfmaddnepbh256((__v16bf)__A, -(__v16bf)__B,
return (__m256bh)__builtin_ia32_vfmaddbf16256((__v16bf)__A, -(__v16bf)__B,
-(__v16bf)__C);
}
@ -969,7 +969,7 @@ static __inline__ __m256bh __DEFAULT_FN_ATTRS256 _mm256_maskz_fnmsub_pbh(
static __inline__ __m128bh __DEFAULT_FN_ATTRS128 _mm_fmadd_pbh(__m128bh __A,
__m128bh __B,
__m128bh __C) {
return (__m128bh)__builtin_ia32_vfmaddnepbh128((__v8bf)__A, (__v8bf)__B,
return (__m128bh)__builtin_ia32_vfmaddbf16128((__v8bf)__A, (__v8bf)__B,
(__v8bf)__C);
}
@ -997,7 +997,7 @@ _mm_maskz_fmadd_pbh(__mmask8 __U, __m128bh __A, __m128bh __B, __m128bh __C) {
static __inline__ __m128bh __DEFAULT_FN_ATTRS128 _mm_fmsub_pbh(__m128bh __A,
__m128bh __B,
__m128bh __C) {
return (__m128bh)__builtin_ia32_vfmaddnepbh128((__v8bf)__A, (__v8bf)__B,
return (__m128bh)__builtin_ia32_vfmaddbf16128((__v8bf)__A, (__v8bf)__B,
-(__v8bf)__C);
}
@ -1025,7 +1025,7 @@ _mm_maskz_fmsub_pbh(__mmask8 __U, __m128bh __A, __m128bh __B, __m128bh __C) {
static __inline__ __m128bh __DEFAULT_FN_ATTRS128 _mm_fnmadd_pbh(__m128bh __A,
__m128bh __B,
__m128bh __C) {
return (__m128bh)__builtin_ia32_vfmaddnepbh128((__v8bf)__A, -(__v8bf)__B,
return (__m128bh)__builtin_ia32_vfmaddbf16128((__v8bf)__A, -(__v8bf)__B,
(__v8bf)__C);
}
@ -1053,7 +1053,7 @@ _mm_maskz_fnmadd_pbh(__mmask8 __U, __m128bh __A, __m128bh __B, __m128bh __C) {
static __inline__ __m128bh __DEFAULT_FN_ATTRS128 _mm_fnmsub_pbh(__m128bh __A,
__m128bh __B,
__m128bh __C) {
return (__m128bh)__builtin_ia32_vfmaddnepbh128((__v8bf)__A, -(__v8bf)__B,
return (__m128bh)__builtin_ia32_vfmaddbf16128((__v8bf)__A, -(__v8bf)__B,
-(__v8bf)__C);
}

File diff suppressed because it is too large Load Diff

View File

@ -66,34 +66,19 @@
(__v2df)_mm_setzero_pd(), (__mmask8)(U)))
#define _mm256_minmax_pd(A, B, C) \
((__m256d)__builtin_ia32_vminmaxpd256_round_mask( \
((__m256d)__builtin_ia32_vminmaxpd256_mask( \
(__v4df)(__m256d)(A), (__v4df)(__m256d)(B), (int)(C), \
(__v4df)_mm256_setzero_pd(), (__mmask8)-1, _MM_FROUND_NO_EXC))
(__v4df)_mm256_setzero_pd(), (__mmask8)-1))
#define _mm256_mask_minmax_pd(W, U, A, B, C) \
((__m256d)__builtin_ia32_vminmaxpd256_round_mask( \
((__m256d)__builtin_ia32_vminmaxpd256_mask( \
(__v4df)(__m256d)(A), (__v4df)(__m256d)(B), (int)(C), \
(__v4df)(__m256d)(W), (__mmask8)(U), _MM_FROUND_NO_EXC))
(__v4df)(__m256d)(W), (__mmask8)(U)))
#define _mm256_maskz_minmax_pd(U, A, B, C) \
((__m256d)__builtin_ia32_vminmaxpd256_round_mask( \
((__m256d)__builtin_ia32_vminmaxpd256_mask( \
(__v4df)(__m256d)(A), (__v4df)(__m256d)(B), (int)(C), \
(__v4df)_mm256_setzero_pd(), (__mmask8)(U), _MM_FROUND_NO_EXC))
#define _mm256_minmax_round_pd(A, B, C, R) \
((__m256d)__builtin_ia32_vminmaxpd256_round_mask( \
(__v4df)(__m256d)(A), (__v4df)(__m256d)(B), (int)(C), \
(__v4df)_mm256_undefined_pd(), (__mmask8)-1, (int)(R)))
#define _mm256_mask_minmax_round_pd(W, U, A, B, C, R) \
((__m256d)__builtin_ia32_vminmaxpd256_round_mask( \
(__v4df)(__m256d)(A), (__v4df)(__m256d)(B), (int)(C), \
(__v4df)(__m256d)(W), (__mmask8)(U), (int)(R)))
#define _mm256_maskz_minmax_round_pd(U, A, B, C, R) \
((__m256d)__builtin_ia32_vminmaxpd256_round_mask( \
(__v4df)(__m256d)(A), (__v4df)(__m256d)(B), (int)(C), \
(__v4df)_mm256_setzero_pd(), (__mmask8)(U), (int)(R)))
(__v4df)_mm256_setzero_pd(), (__mmask8)(U)))
#define _mm_minmax_ph(A, B, C) \
((__m128h)__builtin_ia32_vminmaxph128_mask( \
@ -111,34 +96,19 @@
(__v8hf)_mm_setzero_ph(), (__mmask8)(U)))
#define _mm256_minmax_ph(A, B, C) \
((__m256h)__builtin_ia32_vminmaxph256_round_mask( \
((__m256h)__builtin_ia32_vminmaxph256_mask( \
(__v16hf)(__m256h)(A), (__v16hf)(__m256h)(B), (int)(C), \
(__v16hf)_mm256_setzero_ph(), (__mmask16)-1, _MM_FROUND_NO_EXC))
(__v16hf)_mm256_setzero_ph(), (__mmask16)-1))
#define _mm256_mask_minmax_ph(W, U, A, B, C) \
((__m256h)__builtin_ia32_vminmaxph256_round_mask( \
((__m256h)__builtin_ia32_vminmaxph256_mask( \
(__v16hf)(__m256h)(A), (__v16hf)(__m256h)(B), (int)(C), \
(__v16hf)(__m256h)(W), (__mmask16)(U), _MM_FROUND_NO_EXC))
(__v16hf)(__m256h)(W), (__mmask16)(U)))
#define _mm256_maskz_minmax_ph(U, A, B, C) \
((__m256h)__builtin_ia32_vminmaxph256_round_mask( \
((__m256h)__builtin_ia32_vminmaxph256_mask( \
(__v16hf)(__m256h)(A), (__v16hf)(__m256h)(B), (int)(C), \
(__v16hf)_mm256_setzero_ph(), (__mmask16)(U), _MM_FROUND_NO_EXC))
#define _mm256_minmax_round_ph(A, B, C, R) \
((__m256h)__builtin_ia32_vminmaxph256_round_mask( \
(__v16hf)(__m256h)(A), (__v16hf)(__m256h)(B), (int)(C), \
(__v16hf)_mm256_undefined_ph(), (__mmask16)-1, (int)(R)))
#define _mm256_mask_minmax_round_ph(W, U, A, B, C, R) \
((__m256h)__builtin_ia32_vminmaxph256_round_mask( \
(__v16hf)(__m256h)(A), (__v16hf)(__m256h)(B), (C), \
(__v16hf)(__m256h)(W), (__mmask16)(U), (int)(R)))
#define _mm256_maskz_minmax_round_ph(U, A, B, C, R) \
((__m256h)__builtin_ia32_vminmaxph256_round_mask( \
(__v16hf)(__m256h)(A), (__v16hf)(__m256h)(B), (int)(C), \
(__v16hf)_mm256_setzero_ph(), (__mmask16)(U), (int)(R)))
(__v16hf)_mm256_setzero_ph(), (__mmask16)(U)))
#define _mm_minmax_ps(A, B, C) \
((__m128)__builtin_ia32_vminmaxps128_mask( \
@ -156,34 +126,19 @@
(__v4sf)_mm_setzero_ps(), (__mmask8)(U)))
#define _mm256_minmax_ps(A, B, C) \
((__m256)__builtin_ia32_vminmaxps256_round_mask( \
((__m256)__builtin_ia32_vminmaxps256_mask( \
(__v8sf)(__m256)(A), (__v8sf)(__m256)(B), (int)(C), \
(__v8sf)_mm256_setzero_ps(), (__mmask8)-1, _MM_FROUND_NO_EXC))
(__v8sf)_mm256_setzero_ps(), (__mmask8)-1))
#define _mm256_mask_minmax_ps(W, U, A, B, C) \
((__m256)__builtin_ia32_vminmaxps256_round_mask( \
((__m256)__builtin_ia32_vminmaxps256_mask( \
(__v8sf)(__m256)(A), (__v8sf)(__m256)(B), (int)(C), (__v8sf)(__m256)(W), \
(__mmask8)(U), _MM_FROUND_NO_EXC))
(__mmask8)(U)))
#define _mm256_maskz_minmax_ps(U, A, B, C) \
((__m256)__builtin_ia32_vminmaxps256_round_mask( \
((__m256)__builtin_ia32_vminmaxps256_mask( \
(__v8sf)(__m256)(A), (__v8sf)(__m256)(B), (int)(C), \
(__v8sf)_mm256_setzero_ps(), (__mmask8)(U), _MM_FROUND_NO_EXC))
#define _mm256_minmax_round_ps(A, B, C, R) \
((__m256)__builtin_ia32_vminmaxps256_round_mask( \
(__v8sf)(__m256)(A), (__v8sf)(__m256)(B), (int)(C), \
(__v8sf)_mm256_undefined_ps(), (__mmask8)-1, (int)(R)))
#define _mm256_mask_minmax_round_ps(W, U, A, B, C, R) \
((__m256)__builtin_ia32_vminmaxps256_round_mask( \
(__v8sf)(__m256)(A), (__v8sf)(__m256)(B), (int)(C), (__v8sf)(__m256)(W), \
(__mmask8)(U), (int)(R)))
#define _mm256_maskz_minmax_round_ps(U, A, B, C, R) \
((__m256)__builtin_ia32_vminmaxps256_round_mask( \
(__v8sf)(__m256)(A), (__v8sf)(__m256)(B), (int)(C), \
(__v8sf)_mm256_setzero_ps(), (__mmask8)(U), (int)(R)))
(__v8sf)_mm256_setzero_ps(), (__mmask8)(U)))
#define _mm_minmax_sd(A, B, C) \
((__m128d)__builtin_ia32_vminmaxsd_round_mask( \

File diff suppressed because it is too large Load Diff

View File

@ -71,175 +71,134 @@
#endif /* __x86_64__ */
// 128 Bit : Double -> int
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_cvttspd_epi32(__m128d __A) {
static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_cvtts_pd_epi32(__m128d __A) {
return ((__m128i)__builtin_ia32_vcvttpd2dqs128_mask(
(__v2df)__A, (__v4si)(__m128i)_mm_undefined_si128(), (__mmask8)(-1)));
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_mask_cvttspd_epi32(__m128i __W, __mmask8 __U, __m128d __A) {
_mm_mask_cvtts_pd_epi32(__m128i __W, __mmask8 __U, __m128d __A) {
return ((__m128i)__builtin_ia32_vcvttpd2dqs128_mask((__v2df)__A, (__v4si)__W,
__U));
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_maskz_cvttspd_epi32(__mmask16 __U, __m128d __A) {
_mm_maskz_cvtts_pd_epi32(__mmask16 __U, __m128d __A) {
return ((__m128i)__builtin_ia32_vcvttpd2dqs128_mask(
(__v2df)__A, (__v4si)(__m128i)_mm_setzero_si128(), __U));
}
// 256 Bit : Double -> int
static __inline__ __m128i __DEFAULT_FN_ATTRS256
_mm256_cvttspd_epi32(__m256d __A) {
return ((__m128i)__builtin_ia32_vcvttpd2dqs256_round_mask(
(__v4df)__A, (__v4si)_mm_undefined_si128(), (__mmask8)-1,
_MM_FROUND_CUR_DIRECTION));
_mm256_cvtts_pd_epi32(__m256d __A) {
return ((__m128i)__builtin_ia32_vcvttpd2dqs256_mask(
(__v4df)__A, (__v4si)_mm_undefined_si128(), (__mmask8)-1));
}
static __inline__ __m128i __DEFAULT_FN_ATTRS256
_mm256_mask_cvttspd_epi32(__m128i __W, __mmask8 __U, __m256d __A) {
return ((__m128i)__builtin_ia32_vcvttpd2dqs256_round_mask(
(__v4df)__A, (__v4si)__W, __U, _MM_FROUND_CUR_DIRECTION));
_mm256_mask_cvtts_pd_epi32(__m128i __W, __mmask8 __U, __m256d __A) {
return ((__m128i)__builtin_ia32_vcvttpd2dqs256_mask((__v4df)__A, (__v4si)__W,
__U));
}
static __inline__ __m128i __DEFAULT_FN_ATTRS256
_mm256_maskz_cvttspd_epi32(__mmask8 __U, __m256d __A) {
return ((__m128i)__builtin_ia32_vcvttpd2dqs256_round_mask(
(__v4df)__A, (__v4si)_mm_setzero_si128(), __U, _MM_FROUND_CUR_DIRECTION));
_mm256_maskz_cvtts_pd_epi32(__mmask8 __U, __m256d __A) {
return ((__m128i)__builtin_ia32_vcvttpd2dqs256_mask(
(__v4df)__A, (__v4si)_mm_setzero_si128(), __U));
}
#define _mm256_cvtts_roundpd_epi32(__A, __R) \
((__m128i)__builtin_ia32_vcvttpd2dqs256_round_mask( \
(__v4df)(__m256d)__A, (__v4si)(__m128i)_mm_undefined_si128(), \
(__mmask8) - 1, (int)(__R)))
#define _mm256_mask_cvtts_roundpd_epi32(__W, __U, __A, __R) \
((__m128i)__builtin_ia32_vcvttpd2dqs256_round_mask( \
(__v4df)(__m256d)__A, (__v4si)(__m128i)__W, (__mmask8)__U, (int)(__R)))
#define _mm256_maskz_cvtts_roundpd_epi32(__U, __A, __R) \
((__m128i)__builtin_ia32_vcvttpd2dqs256_round_mask( \
(__v4df)(__m256d)__A, (__v4si)(__m128i)_mm_setzero_si128(), \
(__mmask8)__U, (int)(__R)))
// 128 Bit : Double -> uint
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_cvttspd_epu32(__m128d __A) {
static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_cvtts_pd_epu32(__m128d __A) {
return ((__m128i)__builtin_ia32_vcvttpd2udqs128_mask(
(__v2df)__A, (__v4si)(__m128i)_mm_undefined_si128(), (__mmask8)(-1)));
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_mask_cvttspd_epu32(__m128i __W, __mmask8 __U, __m128d __A) {
_mm_mask_cvtts_pd_epu32(__m128i __W, __mmask8 __U, __m128d __A) {
return ((__m128i)__builtin_ia32_vcvttpd2udqs128_mask(
(__v2df)__A, (__v4si)(__m128i)__W, (__mmask8)__U));
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_maskz_cvttspd_epu32(__mmask8 __U, __m128d __A) {
_mm_maskz_cvtts_pd_epu32(__mmask8 __U, __m128d __A) {
return ((__m128i)__builtin_ia32_vcvttpd2udqs128_mask(
(__v2df)__A, (__v4si)(__m128i)_mm_setzero_si128(), __U));
}
// 256 Bit : Double -> uint
static __inline__ __m128i __DEFAULT_FN_ATTRS256
_mm256_cvttspd_epu32(__m256d __A) {
return ((__m128i)__builtin_ia32_vcvttpd2udqs256_round_mask(
(__v4df)__A, (__v4si)_mm_undefined_si128(), (__mmask8)-1,
_MM_FROUND_CUR_DIRECTION));
_mm256_cvtts_pd_epu32(__m256d __A) {
return ((__m128i)__builtin_ia32_vcvttpd2udqs256_mask(
(__v4df)__A, (__v4si)_mm_undefined_si128(), (__mmask8)-1));
}
static __inline__ __m128i __DEFAULT_FN_ATTRS256
_mm256_mask_cvttspd_epu32(__m128i __W, __mmask8 __U, __m256d __A) {
return ((__m128i)__builtin_ia32_vcvttpd2udqs256_round_mask(
(__v4df)__A, (__v4si)__W, __U, _MM_FROUND_CUR_DIRECTION));
_mm256_mask_cvtts_pd_epu32(__m128i __W, __mmask8 __U, __m256d __A) {
return ((__m128i)__builtin_ia32_vcvttpd2udqs256_mask((__v4df)__A, (__v4si)__W,
__U));
}
static __inline__ __m128i __DEFAULT_FN_ATTRS256
_mm256_maskz_cvttspd_epu32(__mmask8 __U, __m256d __A) {
return ((__m128i)__builtin_ia32_vcvttpd2udqs256_round_mask(
(__v4df)__A, (__v4si)_mm_setzero_si128(), __U, _MM_FROUND_CUR_DIRECTION));
_mm256_maskz_cvtts_pd_epu32(__mmask8 __U, __m256d __A) {
return ((__m128i)__builtin_ia32_vcvttpd2udqs256_mask(
(__v4df)__A, (__v4si)_mm_setzero_si128(), __U));
}
#define _mm256_cvtts_roundpd_epu32(__A, __R) \
((__m128i)__builtin_ia32_vcvttpd2udqs256_round_mask( \
(__v4df)(__m256d)__A, (__v4si)(__m128i)_mm_undefined_si128(), \
(__mmask8) - 1, (int)(__R)))
#define _mm256_mask_cvtts_roundpd_epu32(__W, __U, __A, __R) \
((__m128i)__builtin_ia32_vcvttpd2udqs256_round_mask( \
(__v4df)(__m256d)__A, (__v4si)(__m128i)__W, (__mmask8)__U, (int)(__R)))
#define _mm256_maskz_cvtts_roundpd_epu32(__U, __A, __R) \
((__m128i)__builtin_ia32_vcvttpd2udqs256_round_mask( \
(__v4df)(__m256d)__A, (__v4si)(__m128i)_mm_setzero_si128(), \
(__mmask8)__U, (int)(__R)))
// 128 Bit : Double -> long
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_cvttspd_epi64(__m128d __A) {
static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_cvtts_pd_epi64(__m128d __A) {
return ((__m128i)__builtin_ia32_vcvttpd2qqs128_mask(
(__v2df)__A, (__v2di)_mm_undefined_si128(), (__mmask8)-1));
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_mask_cvttspd_epi64(__m128i __W, __mmask8 __U, __m128d __A) {
_mm_mask_cvtts_pd_epi64(__m128i __W, __mmask8 __U, __m128d __A) {
return ((__m128i)__builtin_ia32_vcvttpd2qqs128_mask((__v2df)__A, (__v2di)__W,
(__mmask8)__U));
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_maskz_cvttspd_epi64(__mmask8 __U, __m128d __A) {
_mm_maskz_cvtts_pd_epi64(__mmask8 __U, __m128d __A) {
return ((__m128i)__builtin_ia32_vcvttpd2qqs128_mask(
(__v2df)__A, (__v2di)_mm_setzero_si128(), (__mmask8)__U));
}
// 256 Bit : Double -> long
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_cvttspd_epi64(__m256d __A) {
return ((__m256i)__builtin_ia32_vcvttpd2qqs256_round_mask(
(__v4df)__A, (__v4di)_mm256_undefined_si256(), (__mmask8)-1,
_MM_FROUND_CUR_DIRECTION));
_mm256_cvtts_pd_epi64(__m256d __A) {
return ((__m256i)__builtin_ia32_vcvttpd2qqs256_mask(
(__v4df)__A, (__v4di)_mm256_undefined_si256(), (__mmask8)-1));
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mask_cvttspd_epi64(__m256i __W, __mmask8 __U, __m256d __A) {
return ((__m256i)__builtin_ia32_vcvttpd2qqs256_round_mask(
(__v4df)__A, (__v4di)__W, __U, _MM_FROUND_CUR_DIRECTION));
_mm256_mask_cvtts_pd_epi64(__m256i __W, __mmask8 __U, __m256d __A) {
return ((__m256i)__builtin_ia32_vcvttpd2qqs256_mask((__v4df)__A, (__v4di)__W,
__U));
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_maskz_cvttspd_epi64(__mmask8 __U, __m256d __A) {
return ((__m256i)__builtin_ia32_vcvttpd2qqs256_round_mask(
(__v4df)__A, (__v4di)_mm256_setzero_si256(), __U,
_MM_FROUND_CUR_DIRECTION));
_mm256_maskz_cvtts_pd_epi64(__mmask8 __U, __m256d __A) {
return ((__m256i)__builtin_ia32_vcvttpd2qqs256_mask(
(__v4df)__A, (__v4di)_mm256_setzero_si256(), __U));
}
#define _mm256_cvtts_roundpd_epi64(__A, __R) \
((__m256i)__builtin_ia32_vcvttpd2qqs256_round_mask( \
(__v4df)__A, (__v4di)_mm256_undefined_si256(), (__mmask8) - 1, \
(int)__R))
#define _mm256_mask_cvtts_roundpd_epi64(__W, __U, __A, __R) \
((__m256i)__builtin_ia32_vcvttpd2qqs256_round_mask((__v4df)__A, (__v4di)__W, \
(__mmask8)__U, (int)__R))
#define _mm256_maskz_cvtts_roundpd_epi64(__U, __A, __R) \
((__m256i)__builtin_ia32_vcvttpd2qqs256_round_mask( \
(__v4df)__A, (__v4di)_mm256_setzero_si256(), (__mmask8)__U, (int)__R))
// 128 Bit : Double -> ulong
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_cvttspd_epu64(__m128d __A) {
static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_cvtts_pd_epu64(__m128d __A) {
return ((__m128i)__builtin_ia32_vcvttpd2uqqs128_mask(
(__v2df)__A, (__v2di)_mm_undefined_si128(), (__mmask8)-1));
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_mask_cvttspd_epu64(__m128i __W, __mmask8 __U, __m128d __A) {
_mm_mask_cvtts_pd_epu64(__m128i __W, __mmask8 __U, __m128d __A) {
return ((__m128i)__builtin_ia32_vcvttpd2uqqs128_mask((__v2df)__A, (__v2di)__W,
(__mmask8)__U));
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_maskz_cvttspd_epu64(__mmask8 __U, __m128d __A) {
_mm_maskz_cvtts_pd_epu64(__mmask8 __U, __m128d __A) {
return ((__m128i)__builtin_ia32_vcvttpd2uqqs128_mask(
(__v2df)__A, (__v2di)_mm_setzero_si128(), (__mmask8)__U));
}
@ -247,105 +206,74 @@ _mm_maskz_cvttspd_epu64(__mmask8 __U, __m128d __A) {
// 256 Bit : Double -> ulong
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_cvttspd_epu64(__m256d __A) {
return ((__m256i)__builtin_ia32_vcvttpd2uqqs256_round_mask(
(__v4df)__A, (__v4di)_mm256_undefined_si256(), (__mmask8)-1,
_MM_FROUND_CUR_DIRECTION));
_mm256_cvtts_pd_epu64(__m256d __A) {
return ((__m256i)__builtin_ia32_vcvttpd2uqqs256_mask(
(__v4df)__A, (__v4di)_mm256_undefined_si256(), (__mmask8)-1));
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mask_cvttspd_epu64(__m256i __W, __mmask8 __U, __m256d __A) {
return ((__m256i)__builtin_ia32_vcvttpd2uqqs256_round_mask(
(__v4df)__A, (__v4di)__W, __U, _MM_FROUND_CUR_DIRECTION));
_mm256_mask_cvtts_pd_epu64(__m256i __W, __mmask8 __U, __m256d __A) {
return ((__m256i)__builtin_ia32_vcvttpd2uqqs256_mask((__v4df)__A, (__v4di)__W,
__U));
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_maskz_cvttspd_epu64(__mmask8 __U, __m256d __A) {
return ((__m256i)__builtin_ia32_vcvttpd2uqqs256_round_mask(
(__v4df)__A, (__v4di)_mm256_setzero_si256(), __U,
_MM_FROUND_CUR_DIRECTION));
_mm256_maskz_cvtts_pd_epu64(__mmask8 __U, __m256d __A) {
return ((__m256i)__builtin_ia32_vcvttpd2uqqs256_mask(
(__v4df)__A, (__v4di)_mm256_setzero_si256(), __U));
}
#define _mm256_cvtts_roundpd_epu64(__A, __R) \
((__m256i)__builtin_ia32_vcvttpd2uqqs256_round_mask( \
(__v4df)__A, (__v4di)_mm256_undefined_si256(), (__mmask8) - 1, \
(int)__R))
#define _mm256_mask_cvtts_roundpd_epu64(__W, __U, __A, __R) \
((__m256i)__builtin_ia32_vcvttpd2uqqs256_round_mask( \
(__v4df)__A, (__v4di)__W, (__mmask8)__U, (int)__R))
#define _mm256_maskz_cvtts_roundpd_epu64(__U, __A, __R) \
((__m256i)__builtin_ia32_vcvttpd2uqqs256_round_mask( \
(__v4df)__A, (__v4di)_mm256_setzero_si256(), (__mmask8)__U, (int)__R))
// 128 Bit : float -> int
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_cvttsps_epi32(__m128 __A) {
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_cvtts_ps_epi32(__m128 __A) {
return ((__m128i)__builtin_ia32_vcvttps2dqs128_mask(
(__v4sf)__A, (__v4si)(__m128i)_mm_undefined_si128(), (__mmask8)(-1)));
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_mask_cvttsps_epi32(__m128i __W, __mmask8 __U, __m128 __A) {
_mm_mask_cvtts_ps_epi32(__m128i __W, __mmask8 __U, __m128 __A) {
return ((__m128i)__builtin_ia32_vcvttps2dqs128_mask((__v4sf)__A, (__v4si)__W,
(__mmask8)__U));
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_maskz_cvttsps_epi32(__mmask8 __U, __m128 __A) {
_mm_maskz_cvtts_ps_epi32(__mmask8 __U, __m128 __A) {
return ((__m128i)__builtin_ia32_vcvttps2dqs128_mask(
(__v4sf)__A, (__v4si)(__m128i)_mm_setzero_si128(), (__mmask8)__U));
}
// 256 Bit : float -> int
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_cvttsps_epi32(__m256 __A) {
return ((__m256i)__builtin_ia32_vcvttps2dqs256_round_mask(
(__v8sf)__A, (__v8si)_mm256_undefined_si256(), (__mmask8)-1,
_MM_FROUND_CUR_DIRECTION));
_mm256_cvtts_ps_epi32(__m256 __A) {
return ((__m256i)__builtin_ia32_vcvttps2dqs256_mask(
(__v8sf)__A, (__v8si)_mm256_undefined_si256(), (__mmask8)-1));
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mask_cvttsps_epi32(__m256i __W, __mmask8 __U, __m256 __A) {
return ((__m256i)__builtin_ia32_vcvttps2dqs256_round_mask(
(__v8sf)__A, (__v8si)__W, __U, _MM_FROUND_CUR_DIRECTION));
_mm256_mask_cvtts_ps_epi32(__m256i __W, __mmask8 __U, __m256 __A) {
return ((__m256i)__builtin_ia32_vcvttps2dqs256_mask((__v8sf)__A, (__v8si)__W,
__U));
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_maskz_cvttsps_epi32(__mmask8 __U, __m256 __A) {
return ((__m256i)__builtin_ia32_vcvttps2dqs256_round_mask(
(__v8sf)__A, (__v8si)_mm256_setzero_si256(), __U,
_MM_FROUND_CUR_DIRECTION));
_mm256_maskz_cvtts_ps_epi32(__mmask8 __U, __m256 __A) {
return ((__m256i)__builtin_ia32_vcvttps2dqs256_mask(
(__v8sf)__A, (__v8si)_mm256_setzero_si256(), __U));
}
#define _mm256_cvtts_roundps_epi32(__A, __R) \
((__m256i)__builtin_ia32_vcvttps2dqs256_round_mask( \
(__v8sf)(__m256)__A, (__v8si)(__m256i)_mm256_undefined_si256(), \
(__mmask8) - 1, (int)(__R)))
#define _mm256_mask_cvtts_roundps_epi32(__W, __U, __A, __R) \
((__m256i)__builtin_ia32_vcvttps2dqs256_round_mask( \
(__v8sf)(__m256)__A, (__v8si)(__m256i)__W, (__mmask8)__U, (int)(__R)))
#define _mm256_maskz_cvtts_roundps_epi32(__U, __A, __R) \
((__m256i)__builtin_ia32_vcvttps2dqs256_round_mask( \
(__v8sf)(__m256)__A, (__v8si)(__m256i)_mm256_setzero_si256(), \
(__mmask8)__U, (int)(__R)))
// 128 Bit : float -> uint
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_cvttsps_epu32(__m128 __A) {
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_cvtts_ps_epu32(__m128 __A) {
return ((__m128i)__builtin_ia32_vcvttps2udqs128_mask(
(__v4sf)__A, (__v4si)(__m128i)_mm_undefined_si128(), (__mmask8)(-1)));
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_mask_cvttsps_epu32(__m128i __W, __mmask8 __U, __m128 __A) {
_mm_mask_cvtts_ps_epu32(__m128i __W, __mmask8 __U, __m128 __A) {
return ((__m128i)__builtin_ia32_vcvttps2udqs128_mask((__v4sf)__A, (__v4si)__W,
(__mmask8)__U));
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_maskz_cvttsps_epu32(__mmask8 __U, __m128 __A) {
_mm_maskz_cvtts_ps_epu32(__mmask8 __U, __m128 __A) {
return ((__m128i)__builtin_ia32_vcvttps2udqs128_mask(
(__v4sf)__A, (__v4si)_mm_setzero_si128(), (__mmask8)__U));
}
@ -353,144 +281,96 @@ _mm_maskz_cvttsps_epu32(__mmask8 __U, __m128 __A) {
// 256 Bit : float -> uint
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_cvttsps_epu32(__m256 __A) {
return ((__m256i)__builtin_ia32_vcvttps2udqs256_round_mask(
(__v8sf)__A, (__v8si)_mm256_undefined_si256(), (__mmask8)-1,
_MM_FROUND_CUR_DIRECTION));
_mm256_cvtts_ps_epu32(__m256 __A) {
return ((__m256i)__builtin_ia32_vcvttps2udqs256_mask(
(__v8sf)__A, (__v8si)_mm256_undefined_si256(), (__mmask8)-1));
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mask_cvttsps_epu32(__m256i __W, __mmask8 __U, __m256 __A) {
return ((__m256i)__builtin_ia32_vcvttps2udqs256_round_mask(
(__v8sf)__A, (__v8si)__W, __U, _MM_FROUND_CUR_DIRECTION));
_mm256_mask_cvtts_ps_epu32(__m256i __W, __mmask8 __U, __m256 __A) {
return ((__m256i)__builtin_ia32_vcvttps2udqs256_mask((__v8sf)__A, (__v8si)__W,
__U));
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_maskz_cvttsps_epu32(__mmask8 __U, __m256 __A) {
return ((__m256i)__builtin_ia32_vcvttps2udqs256_round_mask(
(__v8sf)__A, (__v8si)_mm256_setzero_si256(), __U,
_MM_FROUND_CUR_DIRECTION));
_mm256_maskz_cvtts_ps_epu32(__mmask8 __U, __m256 __A) {
return ((__m256i)__builtin_ia32_vcvttps2udqs256_mask(
(__v8sf)__A, (__v8si)_mm256_setzero_si256(), __U));
}
#define _mm256_cvtts_roundps_epu32(__A, __R) \
((__m256i)__builtin_ia32_vcvttps2udqs256_round_mask( \
(__v8sf)(__m256)__A, (__v8si)(__m256i)_mm256_undefined_si256(), \
(__mmask8) - 1, (int)(__R)))
#define _mm256_mask_cvtts_roundps_epu32(__W, __U, __A, __R) \
((__m256i)__builtin_ia32_vcvttps2udqs256_round_mask( \
(__v8sf)(__m256)__A, (__v8si)(__m256i)__W, (__mmask8)__U, (int)(__R)))
#define _mm256_maskz_cvtts_roundps_epu32(__U, __A, __R) \
((__m256i)__builtin_ia32_vcvttps2udqs256_round_mask( \
(__v8sf)(__m256)__A, (__v8si)(__m256i)_mm256_setzero_si256(), \
(__mmask8)__U, (int)(__R)))
// 128 bit : float -> long
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_cvttsps_epi64(__m128 __A) {
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_cvtts_ps_epi64(__m128 __A) {
return ((__m128i)__builtin_ia32_vcvttps2qqs128_mask(
(__v4sf)__A, (__v2di)_mm_undefined_si128(), (__mmask8)-1));
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_mask_cvttsps_epi64(__m128i __W, __mmask8 __U, __m128 __A) {
_mm_mask_cvtts_ps_epi64(__m128i __W, __mmask8 __U, __m128 __A) {
return ((__m128i)__builtin_ia32_vcvttps2qqs128_mask(
(__v4sf)__A, (__v2di)(__m128i)__W, (__mmask8)__U));
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_maskz_cvttsps_epi64(__mmask8 __U, __m128 __A) {
_mm_maskz_cvtts_ps_epi64(__mmask8 __U, __m128 __A) {
return ((__m128i)__builtin_ia32_vcvttps2qqs128_mask(
(__v4sf)__A, (__v2di)_mm_setzero_si128(), (__mmask8)__U));
}
// 256 bit : float -> long
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_cvttsps_epi64(__m128 __A) {
return ((__m256i)__builtin_ia32_vcvttps2qqs256_round_mask(
(__v4sf)__A, (__v4di)_mm256_undefined_si256(), (__mmask8)-1,
_MM_FROUND_CUR_DIRECTION));
_mm256_cvtts_ps_epi64(__m128 __A) {
return ((__m256i)__builtin_ia32_vcvttps2qqs256_mask(
(__v4sf)__A, (__v4di)_mm256_undefined_si256(), (__mmask8)-1));
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mask_cvttsps_epi64(__m256i __W, __mmask8 __U, __m128 __A) {
return ((__m256i)__builtin_ia32_vcvttps2qqs256_round_mask(
(__v4sf)__A, (__v4di)__W, __U, _MM_FROUND_CUR_DIRECTION));
_mm256_mask_cvtts_ps_epi64(__m256i __W, __mmask8 __U, __m128 __A) {
return ((__m256i)__builtin_ia32_vcvttps2qqs256_mask((__v4sf)__A, (__v4di)__W,
__U));
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_maskz_cvttsps_epi64(__mmask8 __U, __m128 __A) {
return ((__m256i)__builtin_ia32_vcvttps2qqs256_round_mask(
(__v4sf)__A, (__v4di)_mm256_setzero_si256(), __U,
_MM_FROUND_CUR_DIRECTION));
_mm256_maskz_cvtts_ps_epi64(__mmask8 __U, __m128 __A) {
return ((__m256i)__builtin_ia32_vcvttps2qqs256_mask(
(__v4sf)__A, (__v4di)_mm256_setzero_si256(), __U));
}
#define _mm256_cvtts_roundps_epi64(__A, __R) \
((__m256i)__builtin_ia32_vcvttps2qqs256_round_mask( \
(__v4sf)(__m128)__A, (__v4di)_mm256_undefined_si256(), (__mmask8) - 1, \
(int)__R))
#define _mm256_mask_cvtts_roundps_epi64(__W, __U, __A, __R) \
((__m256i)__builtin_ia32_vcvttps2qqs256_round_mask( \
(__v4sf)(__m128)__A, (__v4di)__W, (__mmask8)__U, (int)__R))
#define _mm256_maskz_cvtts_roundps_epi64(__U, __A, __R) \
((__m256i)__builtin_ia32_vcvttps2qqs256_round_mask( \
(__v4sf)(__m128)__A, (__v4di)_mm256_setzero_si256(), (__mmask8)__U, \
(int)__R))
// 128 bit : float -> ulong
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_cvttsps_epu64(__m128 __A) {
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_cvtts_ps_epu64(__m128 __A) {
return ((__m128i)__builtin_ia32_vcvttps2uqqs128_mask(
(__v4sf)__A, (__v2di)_mm_undefined_si128(), (__mmask8)-1));
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_mask_cvttsps_epu64(__m128i __W, __mmask8 __U, __m128 __A) {
_mm_mask_cvtts_ps_epu64(__m128i __W, __mmask8 __U, __m128 __A) {
return ((__m128i)__builtin_ia32_vcvttps2uqqs128_mask(
(__v4sf)__A, (__v2di)(__m128i)__W, (__mmask8)__U));
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_maskz_cvttsps_epu64(__mmask8 __U, __m128 __A) {
_mm_maskz_cvtts_ps_epu64(__mmask8 __U, __m128 __A) {
return ((__m128i)__builtin_ia32_vcvttps2uqqs128_mask(
(__v4sf)__A, (__v2di)_mm_setzero_si128(), (__mmask8)__U));
}
// 256 bit : float -> ulong
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_cvttsps_epu64(__m128 __A) {
return ((__m256i)__builtin_ia32_vcvttps2uqqs256_round_mask(
(__v4sf)__A, (__v4di)_mm256_undefined_si256(), (__mmask8)-1,
_MM_FROUND_CUR_DIRECTION));
_mm256_cvtts_ps_epu64(__m128 __A) {
return ((__m256i)__builtin_ia32_vcvttps2uqqs256_mask(
(__v4sf)__A, (__v4di)_mm256_undefined_si256(), (__mmask8)-1));
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mask_cvttsps_epu64(__m256i __W, __mmask8 __U, __m128 __A) {
return ((__m256i)__builtin_ia32_vcvttps2uqqs256_round_mask(
(__v4sf)__A, (__v4di)__W, __U, _MM_FROUND_CUR_DIRECTION));
_mm256_mask_cvtts_ps_epu64(__m256i __W, __mmask8 __U, __m128 __A) {
return ((__m256i)__builtin_ia32_vcvttps2uqqs256_mask((__v4sf)__A, (__v4di)__W,
__U));
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_maskz_cvttsps_epu64(__mmask8 __U, __m128 __A) {
return ((__m256i)__builtin_ia32_vcvttps2uqqs256_round_mask(
(__v4sf)__A, (__v4di)_mm256_setzero_si256(), __U,
_MM_FROUND_CUR_DIRECTION));
_mm256_maskz_cvtts_ps_epu64(__mmask8 __U, __m128 __A) {
return ((__m256i)__builtin_ia32_vcvttps2uqqs256_mask(
(__v4sf)__A, (__v4di)_mm256_setzero_si256(), __U));
}
#define _mm256_cvtts_roundps_epu64(__A, __R) \
((__m256i)__builtin_ia32_vcvttps2uqqs256_round_mask( \
(__v4sf)(__m128)__A, (__v4di)_mm256_undefined_si256(), (__mmask8) - 1, \
(int)__R))
#define _mm256_mask_cvtts_roundps_epu64(__W, __U, __A, __R) \
((__m256i)__builtin_ia32_vcvttps2uqqs256_round_mask( \
(__v4sf)(__m128)__A, (__v4di)__W, (__mmask8)__U, (int)__R))
#define _mm256_maskz_cvtts_roundps_epu64(__U, __A, __R) \
((__m256i)__builtin_ia32_vcvttps2uqqs256_round_mask( \
(__v4sf)(__m128)__A, (__v4di)_mm256_setzero_si256(), (__mmask8)__U, \
(int)__R))
#undef __DEFAULT_FN_ATTRS128
#undef __DEFAULT_FN_ATTRS256
#endif // __AVX10_2SATCVTDSINTRIN_H

View File

@ -14,431 +14,299 @@
#ifndef __AVX10_2SATCVTINTRIN_H
#define __AVX10_2SATCVTINTRIN_H
#define _mm_ipcvtbf16_epi8(A) \
#define _mm_ipcvts_bf16_epi8(A) \
((__m128i)__builtin_ia32_vcvtbf162ibs128((__v8bf)(__m128bh)(A)))
#define _mm_mask_ipcvtbf16_epi8(W, U, A) \
#define _mm_mask_ipcvts_bf16_epi8(W, U, A) \
((__m128i)__builtin_ia32_selectw_128( \
(__mmask8)(U), (__v8hi)_mm_ipcvtbf16_epi8(A), (__v8hi)(__m128i)(W)))
(__mmask8)(U), (__v8hi)_mm_ipcvts_bf16_epi8(A), (__v8hi)(__m128i)(W)))
#define _mm_maskz_ipcvtbf16_epi8(U, A) \
#define _mm_maskz_ipcvts_bf16_epi8(U, A) \
((__m128i)__builtin_ia32_selectw_128((__mmask8)(U), \
(__v8hi)_mm_ipcvtbf16_epi8(A), \
(__v8hi)_mm_ipcvts_bf16_epi8(A), \
(__v8hi)_mm_setzero_si128()))
#define _mm256_ipcvtbf16_epi8(A) \
#define _mm256_ipcvts_bf16_epi8(A) \
((__m256i)__builtin_ia32_vcvtbf162ibs256((__v16bf)(__m256bh)(A)))
#define _mm256_mask_ipcvtbf16_epi8(W, U, A) \
#define _mm256_mask_ipcvts_bf16_epi8(W, U, A) \
((__m256i)__builtin_ia32_selectw_256((__mmask16)(U), \
(__v16hi)_mm256_ipcvtbf16_epi8(A), \
(__v16hi)_mm256_ipcvts_bf16_epi8(A), \
(__v16hi)(__m256i)(W)))
#define _mm256_maskz_ipcvtbf16_epi8(U, A) \
#define _mm256_maskz_ipcvts_bf16_epi8(U, A) \
((__m256i)__builtin_ia32_selectw_256((__mmask16)(U), \
(__v16hi)_mm256_ipcvtbf16_epi8(A), \
(__v16hi)_mm256_ipcvts_bf16_epi8(A), \
(__v16hi)_mm256_setzero_si256()))
#define _mm_ipcvtbf16_epu8(A) \
#define _mm_ipcvts_bf16_epu8(A) \
((__m128i)__builtin_ia32_vcvtbf162iubs128((__v8bf)(__m128bh)(A)))
#define _mm_mask_ipcvtbf16_epu8(W, U, A) \
#define _mm_mask_ipcvts_bf16_epu8(W, U, A) \
((__m128i)__builtin_ia32_selectw_128( \
(__mmask8)(U), (__v8hi)_mm_ipcvtbf16_epu8(A), (__v8hi)(__m128i)(W)))
(__mmask8)(U), (__v8hi)_mm_ipcvts_bf16_epu8(A), (__v8hi)(__m128i)(W)))
#define _mm_maskz_ipcvtbf16_epu8(U, A) \
#define _mm_maskz_ipcvts_bf16_epu8(U, A) \
((__m128i)__builtin_ia32_selectw_128((__mmask8)(U), \
(__v8hi)_mm_ipcvtbf16_epu8(A), \
(__v8hi)_mm_ipcvts_bf16_epu8(A), \
(__v8hi)_mm_setzero_si128()))
#define _mm256_ipcvtbf16_epu8(A) \
#define _mm256_ipcvts_bf16_epu8(A) \
((__m256i)__builtin_ia32_vcvtbf162iubs256((__v16bf)(__m256bh)(A)))
#define _mm256_mask_ipcvtbf16_epu8(W, U, A) \
#define _mm256_mask_ipcvts_bf16_epu8(W, U, A) \
((__m256i)__builtin_ia32_selectw_256((__mmask16)(U), \
(__v16hi)_mm256_ipcvtbf16_epu8(A), \
(__v16hi)_mm256_ipcvts_bf16_epu8(A), \
(__v16hi)(__m256i)(W)))
#define _mm256_maskz_ipcvtbf16_epu8(U, A) \
#define _mm256_maskz_ipcvts_bf16_epu8(U, A) \
((__m256i)__builtin_ia32_selectw_256((__mmask16)(U), \
(__v16hi)_mm256_ipcvtbf16_epu8(A), \
(__v16hi)_mm256_ipcvts_bf16_epu8(A), \
(__v16hi)_mm256_setzero_si256()))
#define _mm_ipcvtph_epi8(A) \
#define _mm_ipcvts_ph_epi8(A) \
((__m128i)__builtin_ia32_vcvtph2ibs128_mask( \
(__v8hf)(__m128h)(A), (__v8hu)_mm_setzero_si128(), (__mmask8)-1))
#define _mm_mask_ipcvtph_epi8(W, U, A) \
#define _mm_mask_ipcvts_ph_epi8(W, U, A) \
((__m128i)__builtin_ia32_vcvtph2ibs128_mask((__v8hf)(__m128h)(A), \
(__v8hu)(W), (__mmask8)(U)))
#define _mm_maskz_ipcvtph_epi8(U, A) \
#define _mm_maskz_ipcvts_ph_epi8(U, A) \
((__m128i)__builtin_ia32_vcvtph2ibs128_mask( \
(__v8hf)(__m128h)(A), (__v8hu)(_mm_setzero_si128()), (__mmask8)(U)))
#define _mm256_ipcvtph_epi8(A) \
#define _mm256_ipcvts_ph_epi8(A) \
((__m256i)__builtin_ia32_vcvtph2ibs256_mask( \
(__v16hf)(__m256h)(A), (__v16hu)_mm256_setzero_si256(), (__mmask16)-1, \
_MM_FROUND_CUR_DIRECTION))
(__v16hf)(__m256h)(A), (__v16hu)_mm256_setzero_si256(), (__mmask16)-1))
#define _mm256_mask_ipcvtph_epi8(W, U, A) \
#define _mm256_mask_ipcvts_ph_epi8(W, U, A) \
((__m256i)__builtin_ia32_vcvtph2ibs256_mask((__v16hf)(__m256h)(A), \
(__v16hu)(W), (__mmask16)(U), \
_MM_FROUND_CUR_DIRECTION))
(__v16hu)(W), (__mmask16)(U)))
#define _mm256_maskz_ipcvtph_epi8(U, A) \
#define _mm256_maskz_ipcvts_ph_epi8(U, A) \
((__m256i)__builtin_ia32_vcvtph2ibs256_mask( \
(__v16hf)(__m256h)(A), (__v16hu)(_mm256_setzero_si256()), \
(__mmask16)(U), _MM_FROUND_CUR_DIRECTION))
(__mmask16)(U)))
#define _mm256_ipcvt_roundph_epi8(A, R) \
((__m256i)__builtin_ia32_vcvtph2ibs256_mask((__v16hf)(__m256h)(A), \
(__v16hu)_mm256_setzero_si256(), \
(__mmask16)-1, (const int)R))
#define _mm256_mask_ipcvt_roundph_epi8(W, U, A, R) \
((__m256i)__builtin_ia32_vcvtph2ibs256_mask( \
(__v16hf)(__m256h)(A), (__v16hu)(W), (__mmask16)(U), (const int)R))
#define _mm256_maskz_ipcvt_roundph_epi8(U, A, R) \
((__m256i)__builtin_ia32_vcvtph2ibs256_mask((__v16hf)(__m256h)(A), \
(__v16hu)_mm256_setzero_si256(), \
(__mmask16)(U), (const int)R))
#define _mm_ipcvtph_epu8(A) \
#define _mm_ipcvts_ph_epu8(A) \
((__m128i)__builtin_ia32_vcvtph2iubs128_mask( \
(__v8hf)(__m128h)(A), (__v8hu)_mm_setzero_si128(), (__mmask8)-1))
#define _mm_mask_ipcvtph_epu8(W, U, A) \
#define _mm_mask_ipcvts_ph_epu8(W, U, A) \
((__m128i)__builtin_ia32_vcvtph2iubs128_mask((__v8hf)(__m128h)(A), \
(__v8hu)(W), (__mmask8)(U)))
#define _mm_maskz_ipcvtph_epu8(U, A) \
#define _mm_maskz_ipcvts_ph_epu8(U, A) \
((__m128i)__builtin_ia32_vcvtph2iubs128_mask( \
(__v8hf)(__m128h)(A), (__v8hu)(_mm_setzero_si128()), (__mmask8)(U)))
#define _mm256_ipcvtph_epu8(A) \
#define _mm256_ipcvts_ph_epu8(A) \
((__m256i)__builtin_ia32_vcvtph2iubs256_mask( \
(__v16hf)(__m256h)(A), (__v16hu)_mm256_setzero_si256(), (__mmask16)-1, \
_MM_FROUND_CUR_DIRECTION))
(__v16hf)(__m256h)(A), (__v16hu)_mm256_setzero_si256(), (__mmask16)-1))
#define _mm256_mask_ipcvtph_epu8(W, U, A) \
#define _mm256_mask_ipcvts_ph_epu8(W, U, A) \
((__m256i)__builtin_ia32_vcvtph2iubs256_mask((__v16hf)(__m256h)(A), \
(__v16hu)(W), (__mmask16)(U), \
_MM_FROUND_CUR_DIRECTION))
(__v16hu)(W), (__mmask16)(U)))
#define _mm256_maskz_ipcvtph_epu8(U, A) \
#define _mm256_maskz_ipcvts_ph_epu8(U, A) \
((__m256i)__builtin_ia32_vcvtph2iubs256_mask( \
(__v16hf)(__m256h)(A), (__v16hu)(_mm256_setzero_si256()), \
(__mmask16)(U), _MM_FROUND_CUR_DIRECTION))
(__mmask16)(U)))
#define _mm256_ipcvt_roundph_epu8(A, R) \
((__m256i)__builtin_ia32_vcvtph2iubs256_mask( \
(__v16hf)(__m256h)(A), (__v16hu)_mm256_setzero_si256(), (__mmask16)-1, \
(const int)R))
#define _mm256_mask_ipcvt_roundph_epu8(W, U, A, R) \
((__m256i)__builtin_ia32_vcvtph2iubs256_mask( \
(__v16hf)(__m256h)(A), (__v16hu)(W), (__mmask16)(U), (const int)R))
#define _mm256_maskz_ipcvt_roundph_epu8(U, A, R) \
((__m256i)__builtin_ia32_vcvtph2iubs256_mask( \
(__v16hf)(__m256h)(A), (__v16hu)_mm256_setzero_si256(), (__mmask16)(U), \
(const int)R))
#define _mm_ipcvtps_epi8(A) \
#define _mm_ipcvts_ps_epi8(A) \
((__m128i)__builtin_ia32_vcvtps2ibs128_mask( \
(__v4sf)(__m128)(A), (__v4su)_mm_setzero_si128(), (__mmask8)-1))
#define _mm_mask_ipcvtps_epi8(W, U, A) \
#define _mm_mask_ipcvts_ps_epi8(W, U, A) \
((__m128i)__builtin_ia32_vcvtps2ibs128_mask((__v4sf)(__m128)(A), \
(__v4su)(W), (__mmask8)(U)))
#define _mm_maskz_ipcvtps_epi8(U, A) \
#define _mm_maskz_ipcvts_ps_epi8(U, A) \
((__m128i)__builtin_ia32_vcvtps2ibs128_mask( \
(__v4sf)(__m128)(A), (__v4su)(_mm_setzero_si128()), (__mmask8)(U)))
#define _mm256_ipcvtps_epi8(A) \
#define _mm256_ipcvts_ps_epi8(A) \
((__m256i)__builtin_ia32_vcvtps2ibs256_mask( \
(__v8sf)(__m256)(A), (__v8su)_mm256_setzero_si256(), (__mmask8)-1, \
_MM_FROUND_CUR_DIRECTION))
(__v8sf)(__m256)(A), (__v8su)_mm256_setzero_si256(), (__mmask8)-1))
#define _mm256_mask_ipcvtps_epi8(W, U, A) \
#define _mm256_mask_ipcvts_ps_epi8(W, U, A) \
((__m256i)__builtin_ia32_vcvtps2ibs256_mask((__v8sf)(__m256)(A), \
(__v8su)(W), (__mmask8)(U), \
_MM_FROUND_CUR_DIRECTION))
(__v8su)(W), (__mmask8)(U)))
#define _mm256_maskz_ipcvtps_epi8(U, A) \
#define _mm256_maskz_ipcvts_ps_epi8(U, A) \
((__m256i)__builtin_ia32_vcvtps2ibs256_mask( \
(__v8sf)(__m256)(A), (__v8su)(_mm256_setzero_si256()), (__mmask8)(U), \
_MM_FROUND_CUR_DIRECTION))
(__v8sf)(__m256)(A), (__v8su)(_mm256_setzero_si256()), (__mmask8)(U)))
#define _mm256_ipcvt_roundps_epi8(A, R) \
((__m256i)__builtin_ia32_vcvtps2ibs256_mask((__v8sf)(__m256)(A), \
(__v8su)_mm256_setzero_si256(), \
(__mmask8)-1, (const int)R))
#define _mm256_mask_ipcvt_roundps_epi8(W, U, A, R) \
((__m256i)__builtin_ia32_vcvtps2ibs256_mask( \
(__v8sf)(__m256)(A), (__v8su)(W), (__mmask8)(U), (const int)R))
#define _mm256_maskz_ipcvt_roundps_epi8(U, A, R) \
((__m256i)__builtin_ia32_vcvtps2ibs256_mask((__v8sf)(__m256)(A), \
(__v8su)_mm256_setzero_si256(), \
(__mmask8)(U), (const int)R))
#define _mm_ipcvtps_epu8(A) \
#define _mm_ipcvts_ps_epu8(A) \
((__m128i)__builtin_ia32_vcvtps2iubs128_mask( \
(__v4sf)(__m128)(A), (__v4su)_mm_setzero_si128(), (__mmask8)-1))
#define _mm_mask_ipcvtps_epu8(W, U, A) \
#define _mm_mask_ipcvts_ps_epu8(W, U, A) \
((__m128i)__builtin_ia32_vcvtps2iubs128_mask((__v4sf)(__m128)(A), \
(__v4su)(W), (__mmask8)(U)))
#define _mm_maskz_ipcvtps_epu8(U, A) \
#define _mm_maskz_ipcvts_ps_epu8(U, A) \
((__m128i)__builtin_ia32_vcvtps2iubs128_mask( \
(__v4sf)(__m128)(A), (__v4su)(_mm_setzero_si128()), (__mmask8)(U)))
#define _mm256_ipcvtps_epu8(A) \
#define _mm256_ipcvts_ps_epu8(A) \
((__m256i)__builtin_ia32_vcvtps2iubs256_mask( \
(__v8sf)(__m256)(A), (__v8su)_mm256_setzero_si256(), (__mmask8)-1, \
_MM_FROUND_CUR_DIRECTION))
(__v8sf)(__m256)(A), (__v8su)_mm256_setzero_si256(), (__mmask8)-1))
#define _mm256_mask_ipcvtps_epu8(W, U, A) \
#define _mm256_mask_ipcvts_ps_epu8(W, U, A) \
((__m256i)__builtin_ia32_vcvtps2iubs256_mask((__v8sf)(__m256)(A), \
(__v8su)(W), (__mmask8)(U), \
_MM_FROUND_CUR_DIRECTION))
(__v8su)(W), (__mmask8)(U)))
#define _mm256_maskz_ipcvtps_epu8(U, A) \
#define _mm256_maskz_ipcvts_ps_epu8(U, A) \
((__m256i)__builtin_ia32_vcvtps2iubs256_mask( \
(__v8sf)(__m256)(A), (__v8su)(_mm256_setzero_si256()), (__mmask8)(U), \
_MM_FROUND_CUR_DIRECTION))
(__v8sf)(__m256)(A), (__v8su)(_mm256_setzero_si256()), (__mmask8)(U)))
#define _mm256_ipcvt_roundps_epu8(A, R) \
((__m256i)__builtin_ia32_vcvtps2iubs256_mask((__v8sf)(__m256)(A), \
(__v8su)_mm256_setzero_si256(), \
(__mmask8)-1, (const int)R))
#define _mm256_mask_ipcvt_roundps_epu8(W, U, A, R) \
((__m256i)__builtin_ia32_vcvtps2iubs256_mask( \
(__v8sf)(__m256)(A), (__v8su)(W), (__mmask8)(U), (const int)R))
#define _mm256_maskz_ipcvt_roundps_epu8(U, A, R) \
((__m256i)__builtin_ia32_vcvtps2iubs256_mask((__v8sf)(__m256)(A), \
(__v8su)_mm256_setzero_si256(), \
(__mmask8)(U), (const int)R))
#define _mm_ipcvttbf16_epi8(A) \
#define _mm_ipcvtts_bf16_epi8(A) \
((__m128i)__builtin_ia32_vcvttbf162ibs128((__v8bf)(__m128bh)(A)))
#define _mm_mask_ipcvttbf16_epi8(W, U, A) \
#define _mm_mask_ipcvtts_bf16_epi8(W, U, A) \
((__m128i)__builtin_ia32_selectw_128( \
(__mmask8)(U), (__v8hi)_mm_ipcvttbf16_epi8(A), (__v8hi)(__m128i)(W)))
(__mmask8)(U), (__v8hi)_mm_ipcvtts_bf16_epi8(A), (__v8hi)(__m128i)(W)))
#define _mm_maskz_ipcvttbf16_epi8(U, A) \
#define _mm_maskz_ipcvtts_bf16_epi8(U, A) \
((__m128i)__builtin_ia32_selectw_128((__mmask8)(U), \
(__v8hi)_mm_ipcvttbf16_epi8(A), \
(__v8hi)_mm_ipcvtts_bf16_epi8(A), \
(__v8hi)_mm_setzero_si128()))
#define _mm256_ipcvttbf16_epi8(A) \
#define _mm256_ipcvtts_bf16_epi8(A) \
((__m256i)__builtin_ia32_vcvttbf162ibs256((__v16bf)(__m256bh)(A)))
#define _mm256_mask_ipcvttbf16_epi8(W, U, A) \
#define _mm256_mask_ipcvtts_bf16_epi8(W, U, A) \
((__m256i)__builtin_ia32_selectw_256((__mmask16)(U), \
(__v16hi)_mm256_ipcvttbf16_epi8(A), \
(__v16hi)_mm256_ipcvtts_bf16_epi8(A), \
(__v16hi)(__m256i)(W)))
#define _mm256_maskz_ipcvttbf16_epi8(U, A) \
#define _mm256_maskz_ipcvtts_bf16_epi8(U, A) \
((__m256i)__builtin_ia32_selectw_256((__mmask16)(U), \
(__v16hi)_mm256_ipcvttbf16_epi8(A), \
(__v16hi)_mm256_ipcvtts_bf16_epi8(A), \
(__v16hi)_mm256_setzero_si256()))
#define _mm_ipcvttbf16_epu8(A) \
#define _mm_ipcvtts_bf16_epu8(A) \
((__m128i)__builtin_ia32_vcvttbf162iubs128((__v8bf)(__m128bh)(A)))
#define _mm_mask_ipcvttbf16_epu8(W, U, A) \
#define _mm_mask_ipcvtts_bf16_epu8(W, U, A) \
((__m128i)__builtin_ia32_selectw_128( \
(__mmask8)(U), (__v8hi)_mm_ipcvttbf16_epu8(A), (__v8hi)(__m128i)(W)))
(__mmask8)(U), (__v8hi)_mm_ipcvtts_bf16_epu8(A), (__v8hi)(__m128i)(W)))
#define _mm_maskz_ipcvttbf16_epu8(U, A) \
#define _mm_maskz_ipcvtts_bf16_epu8(U, A) \
((__m128i)__builtin_ia32_selectw_128((__mmask8)(U), \
(__v8hi)_mm_ipcvttbf16_epu8(A), \
(__v8hi)_mm_ipcvtts_bf16_epu8(A), \
(__v8hi)_mm_setzero_si128()))
#define _mm256_ipcvttbf16_epu8(A) \
#define _mm256_ipcvtts_bf16_epu8(A) \
((__m256i)__builtin_ia32_vcvttbf162iubs256((__v16bf)(__m256bh)(A)))
#define _mm256_mask_ipcvttbf16_epu8(W, U, A) \
#define _mm256_mask_ipcvtts_bf16_epu8(W, U, A) \
((__m256i)__builtin_ia32_selectw_256((__mmask16)(U), \
(__v16hi)_mm256_ipcvttbf16_epu8(A), \
(__v16hi)_mm256_ipcvtts_bf16_epu8(A), \
(__v16hi)(__m256i)(W)))
#define _mm256_maskz_ipcvttbf16_epu8(U, A) \
#define _mm256_maskz_ipcvtts_bf16_epu8(U, A) \
((__m256i)__builtin_ia32_selectw_256((__mmask16)(U), \
(__v16hi)_mm256_ipcvttbf16_epu8(A), \
(__v16hi)_mm256_ipcvtts_bf16_epu8(A), \
(__v16hi)_mm256_setzero_si256()))
#define _mm_ipcvttph_epi8(A) \
#define _mm_ipcvtts_ph_epi8(A) \
((__m128i)__builtin_ia32_vcvttph2ibs128_mask( \
(__v8hf)(__m128h)(A), (__v8hu)_mm_setzero_si128(), (__mmask8)-1))
#define _mm_mask_ipcvttph_epi8(W, U, A) \
#define _mm_mask_ipcvtts_ph_epi8(W, U, A) \
((__m128i)__builtin_ia32_vcvttph2ibs128_mask((__v8hf)(__m128h)(A), \
(__v8hu)(W), (__mmask8)(U)))
#define _mm_maskz_ipcvttph_epi8(U, A) \
#define _mm_maskz_ipcvtts_ph_epi8(U, A) \
((__m128i)__builtin_ia32_vcvttph2ibs128_mask( \
(__v8hf)(__m128h)(A), (__v8hu)(_mm_setzero_si128()), (__mmask8)(U)))
#define _mm256_ipcvttph_epi8(A) \
#define _mm256_ipcvtts_ph_epi8(A) \
((__m256i)__builtin_ia32_vcvttph2ibs256_mask( \
(__v16hf)(__m256h)(A), (__v16hu)_mm256_setzero_si256(), (__mmask16)-1, \
_MM_FROUND_CUR_DIRECTION))
(__v16hf)(__m256h)(A), (__v16hu)_mm256_setzero_si256(), (__mmask16)-1))
#define _mm256_mask_ipcvttph_epi8(W, U, A) \
#define _mm256_mask_ipcvtts_ph_epi8(W, U, A) \
((__m256i)__builtin_ia32_vcvttph2ibs256_mask((__v16hf)(__m256h)(A), \
(__v16hu)(W), (__mmask16)(U), \
_MM_FROUND_CUR_DIRECTION))
(__v16hu)(W), (__mmask16)(U)))
#define _mm256_maskz_ipcvttph_epi8(U, A) \
#define _mm256_maskz_ipcvtts_ph_epi8(U, A) \
((__m256i)__builtin_ia32_vcvttph2ibs256_mask( \
(__v16hf)(__m256h)(A), (__v16hu)(_mm256_setzero_si256()), \
(__mmask16)(U), _MM_FROUND_CUR_DIRECTION))
(__mmask16)(U)))
#define _mm256_ipcvtt_roundph_epi8(A, R) \
((__m256i)__builtin_ia32_vcvttph2ibs256_mask( \
(__v16hf)(__m256h)(A), (__v16hu)_mm256_setzero_si256(), (__mmask16)-1, \
(const int)R))
#define _mm256_mask_ipcvtt_roundph_epi8(W, U, A, R) \
((__m256i)__builtin_ia32_vcvttph2ibs256_mask( \
(__v16hf)(__m256h)(A), (__v16hu)(W), (__mmask16)(U), (const int)R))
#define _mm256_maskz_ipcvtt_roundph_epi8(U, A, R) \
((__m256i)__builtin_ia32_vcvttph2ibs256_mask( \
(__v16hf)(__m256h)(A), (__v16hu)_mm256_setzero_si256(), (__mmask16)(U), \
(const int)R))
#define _mm_ipcvttph_epu8(A) \
#define _mm_ipcvtts_ph_epu8(A) \
((__m128i)__builtin_ia32_vcvttph2iubs128_mask( \
(__v8hf)(__m128h)(A), (__v8hu)_mm_setzero_si128(), (__mmask8)-1))
#define _mm_mask_ipcvttph_epu8(W, U, A) \
#define _mm_mask_ipcvtts_ph_epu8(W, U, A) \
((__m128i)__builtin_ia32_vcvttph2iubs128_mask((__v8hf)(__m128h)(A), \
(__v8hu)(W), (__mmask8)(U)))
#define _mm_maskz_ipcvttph_epu8(U, A) \
#define _mm_maskz_ipcvtts_ph_epu8(U, A) \
((__m128i)__builtin_ia32_vcvttph2iubs128_mask( \
(__v8hf)(__m128h)(A), (__v8hu)(_mm_setzero_si128()), (__mmask8)(U)))
#define _mm256_ipcvttph_epu8(A) \
#define _mm256_ipcvtts_ph_epu8(A) \
((__m256i)__builtin_ia32_vcvttph2iubs256_mask( \
(__v16hf)(__m256h)(A), (__v16hu)_mm256_setzero_si256(), (__mmask16)-1, \
_MM_FROUND_CUR_DIRECTION))
(__v16hf)(__m256h)(A), (__v16hu)_mm256_setzero_si256(), (__mmask16)-1))
#define _mm256_mask_ipcvttph_epu8(W, U, A) \
#define _mm256_mask_ipcvtts_ph_epu8(W, U, A) \
((__m256i)__builtin_ia32_vcvttph2iubs256_mask((__v16hf)(__m256h)(A), \
(__v16hu)(W), (__mmask16)(U), \
_MM_FROUND_CUR_DIRECTION))
(__v16hu)(W), (__mmask16)(U)))
#define _mm256_maskz_ipcvttph_epu8(U, A) \
#define _mm256_maskz_ipcvtts_ph_epu8(U, A) \
((__m256i)__builtin_ia32_vcvttph2iubs256_mask( \
(__v16hf)(__m256h)(A), (__v16hu)(_mm256_setzero_si256()), \
(__mmask16)(U), _MM_FROUND_CUR_DIRECTION))
(__mmask16)(U)))
#define _mm256_ipcvtt_roundph_epu8(A, R) \
((__m256i)__builtin_ia32_vcvttph2iubs256_mask( \
(__v16hf)(__m256h)(A), (__v16hu)_mm256_setzero_si256(), (__mmask16)-1, \
(const int)R))
#define _mm256_mask_ipcvtt_roundph_epu8(W, U, A, R) \
((__m256i)__builtin_ia32_vcvttph2iubs256_mask( \
(__v16hf)(__m256h)(A), (__v16hu)(W), (__mmask16)(U), (const int)R))
#define _mm256_maskz_ipcvtt_roundph_epu8(U, A, R) \
((__m256i)__builtin_ia32_vcvttph2iubs256_mask( \
(__v16hf)(__m256h)(A), (__v16hu)_mm256_setzero_si256(), (__mmask16)(U), \
(const int)R))
#define _mm_ipcvttps_epi8(A) \
#define _mm_ipcvtts_ps_epi8(A) \
((__m128i)__builtin_ia32_vcvttps2ibs128_mask( \
(__v4sf)(__m128)(A), (__v4su)_mm_setzero_si128(), (__mmask8)-1))
#define _mm_mask_ipcvttps_epi8(W, U, A) \
#define _mm_mask_ipcvtts_ps_epi8(W, U, A) \
((__m128i)__builtin_ia32_vcvttps2ibs128_mask((__v4sf)(__m128)(A), \
(__v4su)(W), (__mmask8)(U)))
#define _mm_maskz_ipcvttps_epi8(U, A) \
#define _mm_maskz_ipcvtts_ps_epi8(U, A) \
((__m128i)__builtin_ia32_vcvttps2ibs128_mask( \
(__v4sf)(__m128)(A), (__v4su)(_mm_setzero_si128()), (__mmask8)(U)))
#define _mm256_ipcvttps_epi8(A) \
#define _mm256_ipcvtts_ps_epi8(A) \
((__m256i)__builtin_ia32_vcvttps2ibs256_mask( \
(__v8sf)(__m256)(A), (__v8su)_mm256_setzero_si256(), (__mmask8)-1, \
_MM_FROUND_CUR_DIRECTION))
(__v8sf)(__m256)(A), (__v8su)_mm256_setzero_si256(), (__mmask8)-1))
#define _mm256_mask_ipcvttps_epi8(W, U, A) \
#define _mm256_mask_ipcvtts_ps_epi8(W, U, A) \
((__m256i)__builtin_ia32_vcvttps2ibs256_mask((__v8sf)(__m256)(A), \
(__v8su)(W), (__mmask8)(U), \
_MM_FROUND_CUR_DIRECTION))
(__v8su)(W), (__mmask8)(U)))
#define _mm256_maskz_ipcvttps_epi8(U, A) \
#define _mm256_maskz_ipcvtts_ps_epi8(U, A) \
((__m256i)__builtin_ia32_vcvttps2ibs256_mask( \
(__v8sf)(__m256)(A), (__v8su)(_mm256_setzero_si256()), (__mmask8)(U), \
_MM_FROUND_CUR_DIRECTION))
(__v8sf)(__m256)(A), (__v8su)(_mm256_setzero_si256()), (__mmask8)(U)))
#define _mm256_ipcvtt_roundps_epi8(A, R) \
((__m256i)__builtin_ia32_vcvttps2ibs256_mask((__v8sf)(__m256)(A), \
(__v8su)_mm256_setzero_si256(), \
(__mmask8)-1, (const int)R))
#define _mm256_mask_ipcvtt_roundps_epi8(W, U, A, R) \
((__m256i)__builtin_ia32_vcvttps2ibs256_mask( \
(__v8sf)(__m256)(A), (__v8su)(W), (__mmask8)(U), (const int)R))
#define _mm256_maskz_ipcvtt_roundps_epi8(U, A, R) \
((__m256i)__builtin_ia32_vcvttps2ibs256_mask((__v8sf)(__m256)(A), \
(__v8su)_mm256_setzero_si256(), \
(__mmask8)(U), (const int)R))
#define _mm_ipcvttps_epu8(A) \
#define _mm_ipcvtts_ps_epu8(A) \
((__m128i)__builtin_ia32_vcvttps2iubs128_mask( \
(__v4sf)(__m128)(A), (__v4su)_mm_setzero_si128(), (__mmask8)-1))
#define _mm_mask_ipcvttps_epu8(W, U, A) \
#define _mm_mask_ipcvtts_ps_epu8(W, U, A) \
((__m128i)__builtin_ia32_vcvttps2iubs128_mask((__v4sf)(__m128)(A), \
(__v4su)(W), (__mmask8)(U)))
#define _mm_maskz_ipcvttps_epu8(U, A) \
#define _mm_maskz_ipcvtts_ps_epu8(U, A) \
((__m128i)__builtin_ia32_vcvttps2iubs128_mask( \
(__v4sf)(__m128)(A), (__v4su)(_mm_setzero_si128()), (__mmask8)(U)))
#define _mm256_ipcvttps_epu8(A) \
#define _mm256_ipcvtts_ps_epu8(A) \
((__m256i)__builtin_ia32_vcvttps2iubs256_mask( \
(__v8sf)(__m256)(A), (__v8su)_mm256_setzero_si256(), (__mmask8)-1, \
_MM_FROUND_CUR_DIRECTION))
(__v8sf)(__m256)(A), (__v8su)_mm256_setzero_si256(), (__mmask8)-1))
#define _mm256_mask_ipcvttps_epu8(W, U, A) \
#define _mm256_mask_ipcvtts_ps_epu8(W, U, A) \
((__m256i)__builtin_ia32_vcvttps2iubs256_mask((__v8sf)(__m256)(A), \
(__v8su)(W), (__mmask8)(U), \
_MM_FROUND_CUR_DIRECTION))
(__v8su)(W), (__mmask8)(U)))
#define _mm256_maskz_ipcvttps_epu8(U, A) \
#define _mm256_maskz_ipcvtts_ps_epu8(U, A) \
((__m256i)__builtin_ia32_vcvttps2iubs256_mask( \
(__v8sf)(__m256)(A), (__v8su)(_mm256_setzero_si256()), (__mmask8)(U), \
_MM_FROUND_CUR_DIRECTION))
#define _mm256_ipcvtt_roundps_epu8(A, R) \
((__m256i)__builtin_ia32_vcvttps2iubs256_mask( \
(__v8sf)(__m256)(A), (__v8su)_mm256_setzero_si256(), (__mmask8)-1, \
(const int)R))
#define _mm256_mask_ipcvtt_roundps_epu8(W, U, A, R) \
((__m256i)__builtin_ia32_vcvttps2iubs256_mask( \
(__v8sf)(__m256)(A), (__v8su)(W), (__mmask8)(U), (const int)R))
#define _mm256_maskz_ipcvtt_roundps_epu8(U, A, R) \
((__m256i)__builtin_ia32_vcvttps2iubs256_mask( \
(__v8sf)(__m256)(A), (__v8su)_mm256_setzero_si256(), (__mmask8)(U), \
(const int)R))
(__v8sf)(__m256)(A), (__v8su)(_mm256_setzero_si256()), (__mmask8)(U)))
#endif // __AVX10_2SATCVTINTRIN_H

View File

@ -553,7 +553,8 @@ static __inline__ __m512h __DEFAULT_FN_ATTRS512 _mm512_abs_ph(__m512h __A) {
}
static __inline__ __m512h __DEFAULT_FN_ATTRS512 _mm512_conj_pch(__m512h __A) {
return (__m512h)_mm512_xor_ps((__m512)__A, _mm512_set1_ps(-0.0f));
return (__m512h)_mm512_xor_epi32((__m512i)__A,
_mm512_set1_epi32(-2147483648));
}
static __inline__ __m512h __DEFAULT_FN_ATTRS512

View File

@ -161,8 +161,6 @@ _mm_tzcnt_64(unsigned long long __X) {
#undef __RELAXED_FN_ATTRS
#if !defined(__SCE__) || __has_feature(modules) || defined(__BMI__)
/* Define the default attributes for the functions in this file. */
#if defined(__cplusplus) && (__cplusplus >= 201103L)
#define __DEFAULT_FN_ATTRS \
@ -603,6 +601,4 @@ __blsr_u64(unsigned long long __X) {
#undef __DEFAULT_FN_ATTRS
#endif /* !defined(__SCE__) || __has_feature(modules) || defined(__BMI__) */
#endif /* __BMIINTRIN_H */

34
lib/include/cpuid.h vendored
View File

@ -268,16 +268,16 @@
#else
/* x86-64 uses %rbx as the base register, so preserve it. */
#define __cpuid(__leaf, __eax, __ebx, __ecx, __edx) \
__asm(" xchgq %%rbx,%q1\n" \
__asm(" xchg{q|} {%%|}rbx,%q1\n" \
" cpuid\n" \
" xchgq %%rbx,%q1" \
" xchg{q|} {%%|}rbx,%q1" \
: "=a"(__eax), "=r"(__ebx), "=c"(__ecx), "=d"(__edx) \
: "0"(__leaf))
#define __cpuid_count(__leaf, __count, __eax, __ebx, __ecx, __edx) \
__asm(" xchgq %%rbx,%q1\n" \
__asm(" xchg{q|} {%%|}rbx,%q1\n" \
" cpuid\n" \
" xchgq %%rbx,%q1" \
" xchg{q|} {%%|}rbx,%q1" \
: "=a"(__eax), "=r"(__ebx), "=c"(__ecx), "=d"(__edx) \
: "0"(__leaf), "2"(__count))
#endif
@ -289,20 +289,22 @@ static __inline unsigned int __get_cpuid_max (unsigned int __leaf,
#ifdef __i386__
int __cpuid_supported;
__asm(" pushfl\n"
" popl %%eax\n"
" movl %%eax,%%ecx\n"
" xorl $0x00200000,%%eax\n"
" pushl %%eax\n"
" popfl\n"
" pushfl\n"
" popl %%eax\n"
" movl $0,%0\n"
" cmpl %%eax,%%ecx\n"
__asm(" pushf{l|d}\n"
" pop{l|} {%%|}eax\n"
" mov{l|} {%%eax,%%ecx|ecx,eax}\n"
" xor{l|} {$0x00200000,%%eax|eax,0x00200000}\n"
" push{l|} {%%|}eax\n"
" popf{l|d}\n"
" pushf{l|d}\n"
" pop{l|} {%%|}eax\n"
" mov{l|} {$0,%0|%0,0}\n"
" cmp{l|} {%%eax,%%ecx|ecx,eax}\n"
" je 1f\n"
" movl $1,%0\n"
" mov{l|} {$1,%0|%0,1}\n"
"1:"
: "=r" (__cpuid_supported) : : "eax", "ecx");
: "=r"(__cpuid_supported)
:
: "eax", "ecx");
if (!__cpuid_supported)
return 0;
#endif

13
lib/include/float.h vendored
View File

@ -18,21 +18,12 @@
* additional definitions provided for Windows.
* For more details see http://msdn.microsoft.com/en-us/library/y0ybw9fy.aspx
*
* Also fall back on Darwin and AIX to allow additional definitions and
* Also fall back on AIX to allow additional definitions and
* implementation-defined values.
*/
#if (defined(__APPLE__) || defined(__MINGW32__) || defined(_MSC_VER) || \
defined(_AIX)) && \
#if (defined(__MINGW32__) || defined(_MSC_VER) || defined(_AIX)) && \
__STDC_HOSTED__ && __has_include_next(<float.h>)
/* Prior to Apple's 10.7 SDK, float.h SDK header used to apply an extra level
* of #include_next<float.h> to keep Metrowerks compilers happy. Avoid this
* extra indirection.
*/
#ifdef __APPLE__
#define _FLOAT_H_
#endif
# include_next <float.h>
/* Undefine anything that we'll be redefining below. */

View File

@ -16,231 +16,112 @@
#include <x86gprintrin.h>
#if !defined(__SCE__) || __has_feature(modules) || defined(__MMX__)
#include <mmintrin.h>
#endif
#if !defined(__SCE__) || __has_feature(modules) || defined(__SSE__)
#include <xmmintrin.h>
#endif
#if !defined(__SCE__) || __has_feature(modules) || defined(__SSE2__)
#include <emmintrin.h>
#endif
#if !defined(__SCE__) || __has_feature(modules) || defined(__SSE3__)
#include <pmmintrin.h>
#endif
#if !defined(__SCE__) || __has_feature(modules) || defined(__SSSE3__)
#include <tmmintrin.h>
#endif
#if !defined(__SCE__) || __has_feature(modules) || \
(defined(__SSE4_2__) || defined(__SSE4_1__))
#include <smmintrin.h>
#endif
#if !defined(__SCE__) || __has_feature(modules) || \
(defined(__AES__) || defined(__PCLMUL__))
#include <wmmintrin.h>
#endif
#if !defined(__SCE__) || __has_feature(modules) || defined(__CLFLUSHOPT__)
#include <clflushoptintrin.h>
#endif
#if !defined(__SCE__) || __has_feature(modules) || defined(__CLWB__)
#include <clwbintrin.h>
#endif
#if !defined(__SCE__) || __has_feature(modules) || defined(__AVX__)
#include <avxintrin.h>
#endif
#if !defined(__SCE__) || __has_feature(modules) || defined(__AVX2__)
#include <avx2intrin.h>
#endif
#if !defined(__SCE__) || __has_feature(modules) || defined(__F16C__)
#include <f16cintrin.h>
#endif
/* No feature check desired due to internal checks */
#include <bmiintrin.h>
#if !defined(__SCE__) || __has_feature(modules) || defined(__BMI2__)
#include <bmi2intrin.h>
#endif
#if !defined(__SCE__) || __has_feature(modules) || defined(__LZCNT__)
#include <lzcntintrin.h>
#endif
#if !defined(__SCE__) || __has_feature(modules) || defined(__POPCNT__)
#include <popcntintrin.h>
#endif
#if !defined(__SCE__) || __has_feature(modules) || defined(__FMA__)
#include <fmaintrin.h>
#endif
#if !defined(__SCE__) || __has_feature(modules) || defined(__AVX512F__)
#include <avx512fintrin.h>
#endif
#if !defined(__SCE__) || __has_feature(modules) || defined(__AVX512VL__)
#include <avx512vlintrin.h>
#endif
#if !defined(__SCE__) || __has_feature(modules) || defined(__AVX512BW__)
#include <avx512bwintrin.h>
#endif
#if !defined(__SCE__) || __has_feature(modules) || defined(__AVX512BITALG__)
#include <avx512bitalgintrin.h>
#endif
#if !defined(__SCE__) || __has_feature(modules) || defined(__AVX512CD__)
#include <avx512cdintrin.h>
#endif
#if !defined(__SCE__) || __has_feature(modules) || defined(__AVX512VPOPCNTDQ__)
#include <avx512vpopcntdqintrin.h>
#endif
#if !defined(__SCE__) || __has_feature(modules) || \
(defined(__AVX512VL__) && defined(__AVX512VPOPCNTDQ__))
#include <avx512vpopcntdqvlintrin.h>
#endif
#if !defined(__SCE__) || __has_feature(modules) || defined(__AVX512VNNI__)
#include <avx512vnniintrin.h>
#endif
#if !defined(__SCE__) || __has_feature(modules) || \
(defined(__AVX512VL__) && defined(__AVX512VNNI__))
#include <avx512vlvnniintrin.h>
#endif
#if !defined(__SCE__) || __has_feature(modules) || defined(__AVXVNNI__)
#include <avxvnniintrin.h>
#endif
#if !defined(__SCE__) || __has_feature(modules) || defined(__AVX512DQ__)
#include <avx512dqintrin.h>
#endif
#if !defined(__SCE__) || __has_feature(modules) || \
(defined(__AVX512VL__) && defined(__AVX512BITALG__))
#include <avx512vlbitalgintrin.h>
#endif
#if !defined(__SCE__) || __has_feature(modules) || \
(defined(__AVX512VL__) && defined(__AVX512BW__))
#include <avx512vlbwintrin.h>
#endif
#if !defined(__SCE__) || __has_feature(modules) || \
(defined(__AVX512VL__) && defined(__AVX512CD__))
#include <avx512vlcdintrin.h>
#endif
#if !defined(__SCE__) || __has_feature(modules) || \
(defined(__AVX512VL__) && defined(__AVX512DQ__))
#include <avx512vldqintrin.h>
#endif
#if !defined(__SCE__) || __has_feature(modules) || defined(__AVX512IFMA__)
#include <avx512ifmaintrin.h>
#endif
#if !defined(__SCE__) || __has_feature(modules) || \
(defined(__AVX512IFMA__) && defined(__AVX512VL__))
#include <avx512ifmavlintrin.h>
#endif
#if !defined(__SCE__) || __has_feature(modules) || defined(__AVXIFMA__)
#include <avxifmaintrin.h>
#endif
#if !defined(__SCE__) || __has_feature(modules) || defined(__AVX512VBMI__)
#include <avx512vbmiintrin.h>
#endif
#if !defined(__SCE__) || __has_feature(modules) || \
(defined(__AVX512VBMI__) && defined(__AVX512VL__))
#include <avx512vbmivlintrin.h>
#endif
#if !defined(__SCE__) || __has_feature(modules) || defined(__AVX512VBMI2__)
#include <avx512vbmi2intrin.h>
#endif
#if !defined(__SCE__) || __has_feature(modules) || \
(defined(__AVX512VBMI2__) && defined(__AVX512VL__))
#include <avx512vlvbmi2intrin.h>
#endif
#if !defined(__SCE__) || __has_feature(modules) || defined(__AVX512FP16__)
#include <avx512fp16intrin.h>
#endif
#if !defined(__SCE__) || __has_feature(modules) || \
(defined(__AVX512VL__) && defined(__AVX512FP16__))
#include <avx512vlfp16intrin.h>
#endif
#if !defined(__SCE__) || __has_feature(modules) || defined(__AVX512BF16__)
#include <avx512bf16intrin.h>
#endif
#if !defined(__SCE__) || __has_feature(modules) || \
(defined(__AVX512VL__) && defined(__AVX512BF16__))
#include <avx512vlbf16intrin.h>
#endif
#if !defined(__SCE__) || __has_feature(modules) || defined(__PKU__)
#include <pkuintrin.h>
#endif
#if !defined(__SCE__) || __has_feature(modules) || defined(__VPCLMULQDQ__)
#include <vpclmulqdqintrin.h>
#endif
#if !defined(__SCE__) || __has_feature(modules) || defined(__VAES__)
#include <vaesintrin.h>
#endif
#if !defined(__SCE__) || __has_feature(modules) || defined(__GFNI__)
#include <gfniintrin.h>
#endif
#if !defined(__SCE__) || __has_feature(modules) || defined(__AVXVNNIINT8__)
#include <avxvnniint8intrin.h>
#endif
#if !defined(__SCE__) || __has_feature(modules) || defined(__AVXNECONVERT__)
#include <avxneconvertintrin.h>
#endif
#if !defined(__SCE__) || __has_feature(modules) || defined(__SHA512__)
#include <sha512intrin.h>
#endif
#if !defined(__SCE__) || __has_feature(modules) || defined(__SM3__)
#include <sm3intrin.h>
#endif
#if !defined(__SCE__) || __has_feature(modules) || defined(__SM4__)
#include <sm4intrin.h>
#endif
#if !defined(__SCE__) || __has_feature(modules) || defined(__AVXVNNIINT16__)
#include <avxvnniint16intrin.h>
#endif
#if !defined(__SCE__) || __has_feature(modules) || defined(__RDPID__)
/// Reads the value of the IA32_TSC_AUX MSR (0xc0000103).
///
/// \headerfile <immintrin.h>
@ -252,9 +133,7 @@ static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__, __
_rdpid_u32(void) {
return __builtin_ia32_rdpid();
}
#endif // __RDPID__
#if !defined(__SCE__) || __has_feature(modules) || defined(__RDRND__)
/// Returns a 16-bit hardware-generated random value.
///
/// \headerfile <immintrin.h>
@ -314,9 +193,7 @@ _rdrand64_step(unsigned long long *__p)
}
#endif
}
#endif /* __RDRND__ */
#if !defined(__SCE__) || __has_feature(modules) || defined(__FSGSBASE__)
#ifdef __x86_64__
/// Reads the FS base register.
///
@ -427,9 +304,6 @@ _writegsbase_u64(unsigned long long __V)
}
#endif
#endif /* __FSGSBASE__ */
#if !defined(__SCE__) || __has_feature(modules) || defined(__MOVBE__)
/* The structs used below are to force the load/store to be unaligned. This
* is accomplished with the __packed__ attribute. The __may_alias__ prevents
@ -543,172 +417,86 @@ _storebe_i64(void * __P, long long __D) {
((struct __storeu_i64*)__P)->__v = __builtin_bswap64((unsigned long long)__D);
}
#endif
#endif /* __MOVBE */
#if !defined(__SCE__) || __has_feature(modules) || defined(__RTM__)
#include <rtmintrin.h>
#include <xtestintrin.h>
#endif
#if !defined(__SCE__) || __has_feature(modules) || defined(__SHA__)
#include <shaintrin.h>
#endif
#if !defined(__SCE__) || __has_feature(modules) || defined(__FXSR__)
#include <fxsrintrin.h>
#endif
/* No feature check desired due to internal MSC_VER checks */
#include <xsaveintrin.h>
#if !defined(__SCE__) || __has_feature(modules) || defined(__XSAVEOPT__)
#include <xsaveoptintrin.h>
#endif
#if !defined(__SCE__) || __has_feature(modules) || defined(__XSAVEC__)
#include <xsavecintrin.h>
#endif
#if !defined(__SCE__) || __has_feature(modules) || defined(__XSAVES__)
#include <xsavesintrin.h>
#endif
#if !defined(__SCE__) || __has_feature(modules) || defined(__SHSTK__)
#include <cetintrin.h>
#endif
/* Intrinsics inside adcintrin.h are available at all times. */
#include <adcintrin.h>
#if !defined(__SCE__) || __has_feature(modules) || defined(__ADX__)
#include <adxintrin.h>
#endif
#if !defined(__SCE__) || __has_feature(modules) || defined(__RDSEED__)
#include <rdseedintrin.h>
#endif
#if !defined(__SCE__) || __has_feature(modules) || defined(__WBNOINVD__)
#include <wbnoinvdintrin.h>
#endif
#if !defined(__SCE__) || __has_feature(modules) || defined(__CLDEMOTE__)
#include <cldemoteintrin.h>
#endif
#if !defined(__SCE__) || __has_feature(modules) || defined(__WAITPKG__)
#include <waitpkgintrin.h>
#endif
#if !defined(__SCE__) || __has_feature(modules) || defined(__MOVDIRI__) || \
defined(__MOVDIR64B__)
#include <movdirintrin.h>
#endif
#if !defined(__SCE__) || __has_feature(modules) || defined(__MOVRS__)
#include <movrsintrin.h>
#endif
#if !defined(__SCE__) || __has_feature(modules) || \
(defined(__AVX10_2__) && defined(__MOVRS__))
#include <movrs_avx10_2intrin.h>
#endif
#if !defined(__SCE__) || __has_feature(modules) || \
(defined(__AVX10_2_512__) && defined(__MOVRS__))
#include <movrs_avx10_2_512intrin.h>
#endif
#if !defined(__SCE__) || __has_feature(modules) || defined(__PCONFIG__)
#include <pconfigintrin.h>
#endif
#if !defined(__SCE__) || __has_feature(modules) || defined(__SGX__)
#include <sgxintrin.h>
#endif
#if !defined(__SCE__) || __has_feature(modules) || defined(__PTWRITE__)
#include <ptwriteintrin.h>
#endif
#if !defined(__SCE__) || __has_feature(modules) || defined(__INVPCID__)
#include <invpcidintrin.h>
#endif
#if !defined(__SCE__) || __has_feature(modules) || defined(__KL__) || \
defined(__WIDEKL__)
#include <keylockerintrin.h>
#endif
#if !defined(__SCE__) || __has_feature(modules) || defined(__AMX_TILE__) || \
defined(__AMX_INT8__) || defined(__AMX_BF16__)
#include <amxintrin.h>
#endif
#if !defined(__SCE__) || __has_feature(modules) || defined(__AMX_FP16__)
#include <amxfp16intrin.h>
#endif
#if !defined(__SCE__) || __has_feature(modules) || defined(__AMX_COMPLEX__)
#include <amxcomplexintrin.h>
#endif
#if !defined(__SCE__) || __has_feature(modules) || defined(__AMX_FP8__)
#include <amxfp8intrin.h>
#endif
#if !defined(__SCE__) || __has_feature(modules) || defined(__AMX_TRANSPOSE__)
#include <amxtransposeintrin.h>
#endif
#if !defined(__SCE__) || __has_feature(modules) || defined(__AMX_MOVRS__)
#include <amxmovrsintrin.h>
#endif
#if !defined(__SCE__) || __has_feature(modules) || \
(defined(__AMX_MOVRS__) && defined(__AMX_TRANSPOSE__))
#include <amxmovrstransposeintrin.h>
#endif
#if !defined(__SCE__) || __has_feature(modules) || defined(__AMX_AVX512__)
#include <amxavx512intrin.h>
#endif
#if !defined(__SCE__) || __has_feature(modules) || defined(__AMX_TF32__)
#include <amxtf32intrin.h>
#endif
#if !defined(__SCE__) || __has_feature(modules) || \
(defined(__AMX_TF32__) && defined(__AMX_TRANSPOSE__))
#include <amxtf32transposeintrin.h>
#endif
#if !defined(__SCE__) || __has_feature(modules) || \
(defined(__AMX_BF16__) && defined(__AMX_TRANSPOSE__))
#include <amxbf16transposeintrin.h>
#endif
#if !defined(__SCE__) || __has_feature(modules) || \
(defined(__AMX_FP16__) && defined(__AMX_TRANSPOSE__))
#include <amxfp16transposeintrin.h>
#endif
#if !defined(__SCE__) || __has_feature(modules) || \
(defined(__AMX_COMPLEX__) && defined(__AMX_TRANSPOSE__))
#include <amxcomplextransposeintrin.h>
#endif
#if !defined(__SCE__) || __has_feature(modules) || \
defined(__AVX512VP2INTERSECT__)
#include <avx512vp2intersectintrin.h>
#endif
#if !defined(__SCE__) || __has_feature(modules) || \
(defined(__AVX512VL__) && defined(__AVX512VP2INTERSECT__))
#include <avx512vlvp2intersectintrin.h>
#endif
#if !defined(__SCE__) || __has_feature(modules) || defined(__AVX10_2__)
#include <avx10_2bf16intrin.h>
#include <avx10_2convertintrin.h>
#include <avx10_2copyintrin.h>
@ -716,33 +504,21 @@ _storebe_i64(void * __P, long long __D) {
#include <avx10_2niintrin.h>
#include <avx10_2satcvtdsintrin.h>
#include <avx10_2satcvtintrin.h>
#endif
#if !defined(__SCE__) || __has_feature(modules) || defined(__AVX10_2_512__)
#include <avx10_2_512bf16intrin.h>
#include <avx10_2_512convertintrin.h>
#include <avx10_2_512minmaxintrin.h>
#include <avx10_2_512niintrin.h>
#include <avx10_2_512satcvtdsintrin.h>
#include <avx10_2_512satcvtintrin.h>
#endif
#if !defined(__SCE__) || __has_feature(modules) || \
(defined(__AVX10_2_512__) && defined(__SM4__))
#include <sm4evexintrin.h>
#endif
#if !defined(__SCE__) || __has_feature(modules) || defined(__ENQCMD__)
#include <enqcmdintrin.h>
#endif
#if !defined(__SCE__) || __has_feature(modules) || defined(__SERIALIZE__)
#include <serializeintrin.h>
#endif
#if !defined(__SCE__) || __has_feature(modules) || defined(__TSXLDTRK__)
#include <tsxldtrkintrin.h>
#endif
#if defined(_MSC_VER) && __has_extension(gnu_asm)
/* Define the default attributes for these intrinsics */

25
lib/include/intrin.h vendored
View File

@ -162,8 +162,6 @@ void _Store_HLERelease(long volatile *, long);
void _Store64_HLERelease(__int64 volatile *, __int64);
void _StorePointer_HLERelease(void *volatile *, void *);
void _WriteBarrier(void);
unsigned __int32 xbegin(void);
void _xend(void);
/* These additional intrinsics are turned on in x64/amd64/x86_64 mode. */
#if defined(__x86_64__) && !defined(__arm64ec__)
@ -372,10 +370,29 @@ static __inline__ void __DEFAULT_FN_ATTRS __nop(void) {
\*----------------------------------------------------------------------------*/
#if defined(__aarch64__) || defined(__arm64ec__)
unsigned __int64 __getReg(int);
long _InterlockedAdd(long volatile *Addend, long Value);
__int64 _InterlockedAdd64(__int64 volatile *Addend, __int64 Value);
unsigned char _interlockedbittestandreset_acq(long volatile *, long);
unsigned char _interlockedbittestandreset_nf(long volatile *, long);
unsigned char _interlockedbittestandreset_rel(long volatile *, long);
unsigned char _interlockedbittestandreset64_acq(__int64 volatile *, __int64);
unsigned char _interlockedbittestandreset64_nf(__int64 volatile *, __int64);
unsigned char _interlockedbittestandreset64_rel(__int64 volatile *, __int64);
unsigned char _interlockedbittestandset_acq(long volatile *, long);
unsigned char _interlockedbittestandset_nf(long volatile *, long);
unsigned char _interlockedbittestandset_rel(long volatile *, long);
unsigned char _interlockedbittestandset64_acq(__int64 volatile *, __int64);
unsigned char _interlockedbittestandset64_nf(__int64 volatile *, __int64);
unsigned char _interlockedbittestandset64_rel(__int64 volatile *, __int64);
long _InterlockedAdd(long volatile *, long);
long _InterlockedAdd_acq(long volatile *, long);
long _InterlockedAdd_nf(long volatile *, long);
long _InterlockedAdd_rel(long volatile *, long);
__int64 _InterlockedAdd64(__int64 volatile *, __int64);
__int64 _InterlockedAdd64_acq(__int64 volatile *, __int64);
__int64 _InterlockedAdd64_nf(__int64 volatile *, __int64);
__int64 _InterlockedAdd64_rel(__int64 volatile *, __int64);
__int64 _ReadStatusReg(int);
void _WriteStatusReg(int, __int64);
unsigned int __sys(int, __int64);
unsigned short __cdecl _byteswap_ushort(unsigned short val);
unsigned long __cdecl _byteswap_ulong (unsigned long val);

View File

@ -28,8 +28,6 @@
#ifndef _KEYLOCKERINTRIN_H
#define _KEYLOCKERINTRIN_H
#if !defined(__SCE__) || __has_feature(modules) || defined(__KL__)
/* Define the default attributes for the functions in this file. */
#define __DEFAULT_FN_ATTRS \
__attribute__((__always_inline__, __nodebug__, __target__("kl"),\
@ -326,10 +324,6 @@ _mm_aesdec256kl_u8(__m128i* __odata, __m128i __idata, const void *__h) {
#undef __DEFAULT_FN_ATTRS
#endif /* !defined(__SCE__ || __has_feature(modules) || defined(__KL__) */
#if !defined(__SCE__) || __has_feature(modules) || defined(__WIDEKL__)
/* Define the default attributes for the functions in this file. */
#define __DEFAULT_FN_ATTRS \
__attribute__((__always_inline__, __nodebug__, __target__("kl,widekl"),\
@ -521,7 +515,4 @@ _mm_aesdecwide256kl_u8(__m128i __odata[8], const __m128i __idata[8], const void*
#undef __DEFAULT_FN_ATTRS
#endif /* !defined(__SCE__) || __has_feature(modules) || defined(__WIDEKL__) \
*/
#endif /* _KEYLOCKERINTRIN_H */

View File

@ -25,7 +25,7 @@
// The LLVM C library uses these named types so we forward declare them.
typedef void (*__atexithandler_t)(void);
typedef int (*__bsearchcompare_t)(const void *, const void *);
typedef int (*__search_compare_t)(const void *, const void *);
typedef int (*__qsortcompare_t)(const void *, const void *);
typedef int (*__qsortrcompare_t)(const void *, const void *, void *);

View File

@ -14,13 +14,15 @@
#ifndef __LZCNTINTRIN_H
#define __LZCNTINTRIN_H
/* Define the default attributes for the functions in this file. */
/* Define the default attributes for the functions in this file.
Allow using the lzcnt intrinsics even for non-LZCNT targets. Since the LZCNT
intrinsics are mapped to llvm.ctlz.*, false, which can be lowered to BSR on
non-LZCNT targets with zero-value input handled correctly. */
#if defined(__cplusplus) && (__cplusplus >= 201103L)
#define __DEFAULT_FN_ATTRS \
__attribute__((__always_inline__, __nodebug__, __target__("lzcnt"))) constexpr
__attribute__((__always_inline__, __nodebug__)) constexpr
#else
#define __DEFAULT_FN_ATTRS \
__attribute__((__always_inline__, __nodebug__, __target__("lzcnt")))
#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__))
#endif
#ifndef _MSC_VER

View File

@ -35,6 +35,14 @@ module _Builtin_intrinsics [system] [extern_c] {
}
}
explicit module arm64 {
requires arm64
requires windows
header "arm64intr.h"
export *
}
explicit module intel {
requires x86
export *
@ -231,6 +239,11 @@ module _Builtin_stdbool [system] {
export *
}
module _Builtin_stdcountof [system] {
header "stdcountof.h"
export *
}
module _Builtin_stddef [system] {
textual header "stddef.h"

View File

@ -14,6 +14,10 @@
#ifndef __PRFCHWINTRIN_H
#define __PRFCHWINTRIN_H
#if defined(__cplusplus)
extern "C" {
#endif
/// Loads a memory sequence containing the specified memory address into
/// all data cache levels.
///
@ -26,11 +30,7 @@
///
/// \param __P
/// A pointer specifying the memory address to be prefetched.
static __inline__ void __attribute__((__always_inline__, __nodebug__))
_m_prefetch(void *__P)
{
__builtin_prefetch (__P, 0, 3 /* _MM_HINT_T0 */);
}
void _m_prefetch(void *__P);
/// Loads a memory sequence containing the specified memory address into
/// the L1 data cache and sets the cache-coherency state to modified.
@ -48,13 +48,10 @@ _m_prefetch(void *__P)
///
/// \param __P
/// A pointer specifying the memory address to be prefetched.
static __inline__ void __attribute__((__always_inline__, __nodebug__))
_m_prefetchw(volatile const void *__P)
{
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wcast-qual"
__builtin_prefetch ((const void*)__P, 1, 3 /* _MM_HINT_T0 */);
#pragma clang diagnostic pop
}
void _m_prefetchw(volatile const void *__P);
#if defined(__cplusplus)
} // extern "C"
#endif
#endif /* __PRFCHWINTRIN_H */

57
lib/include/ptrauth.h vendored
View File

@ -42,6 +42,19 @@ typedef enum {
The extra data is always 0. */
ptrauth_key_cxx_vtable_pointer = ptrauth_key_process_independent_data,
/* The key used to sign metadata pointers to Objective-C method-lists. */
ptrauth_key_method_list_pointer = ptrauth_key_asda,
/* The key used to sign Objective-C isa and super pointers. */
ptrauth_key_objc_isa_pointer = ptrauth_key_process_independent_data,
ptrauth_key_objc_super_pointer = ptrauth_key_process_independent_data,
/* The key used to sign selector pointers */
ptrauth_key_objc_sel_pointer = ptrauth_key_process_dependent_data,
/* The key used to sign Objective-C class_ro_t pointers. */
ptrauth_key_objc_class_ro_pointer = ptrauth_key_process_independent_data,
/* The key used to sign pointers in ELF .init_array/.fini_array. */
ptrauth_key_init_fini_pointer = ptrauth_key_process_independent_code,
@ -259,6 +272,46 @@ typedef __UINTPTR_TYPE__ ptrauth_generic_signature_t;
/* The value is ptrauth_string_discriminator("init_fini") */
#define __ptrauth_init_fini_discriminator 0xd9d4
/* Objective-C pointer auth ABI qualifiers */
#define __ptrauth_objc_method_list_imp \
__ptrauth(ptrauth_key_function_pointer, 1, 0)
#if __has_feature(ptrauth_objc_method_list_pointer)
#define __ptrauth_objc_method_list_pointer \
__ptrauth(ptrauth_key_method_list_pointer, 1, 0xC310)
#else
#define __ptrauth_objc_method_list_pointer
#endif
#define __ptrauth_isa_discriminator 0x6AE1
#define __ptrauth_super_discriminator 0xB5AB
#define __ptrauth_objc_isa_pointer \
__ptrauth(ptrauth_key_objc_isa_pointer, 1, __ptrauth_isa_discriminator)
#if __has_feature(ptrauth_restricted_intptr_qualifier)
#define __ptrauth_objc_isa_uintptr \
__ptrauth_restricted_intptr(ptrauth_key_objc_isa_pointer, 1, \
__ptrauth_isa_discriminator)
#else
#define __ptrauth_objc_isa_uintptr \
__ptrauth(ptrauth_key_objc_isa_pointer, 1, __ptrauth_isa_discriminator)
#endif
#define __ptrauth_objc_super_pointer \
__ptrauth(ptrauth_key_objc_super_pointer, 1, __ptrauth_super_discriminator)
#define __ptrauth_objc_sel_discriminator 0x57c2
#if __has_feature(ptrauth_objc_interface_sel)
#define __ptrauth_objc_sel \
__ptrauth(ptrauth_key_objc_sel_pointer, 1, __ptrauth_objc_sel_discriminator)
#else
#define __ptrauth_objc_sel
#endif
#define __ptrauth_objc_class_ro_discriminator 0x61f8
#define __ptrauth_objc_class_ro \
__ptrauth(ptrauth_key_objc_class_ro_pointer, 1, \
__ptrauth_objc_class_ro_discriminator)
#else
#define ptrauth_strip(__value, __key) \
@ -331,6 +384,10 @@ typedef __UINTPTR_TYPE__ ptrauth_generic_signature_t;
#define ptrauth_cxx_vtable_pointer(key, address_discrimination, \
extra_discrimination...)
#define __ptrauth_objc_isa_pointer
#define __ptrauth_objc_isa_uintptr
#define __ptrauth_objc_super_pointer
#endif /* __has_feature(ptrauth_intrinsics) */
#endif /* __PTRAUTH_H */

View File

@ -24,13 +24,13 @@ static __inline__ long __DEFAULT_FN_ATTRS __riscv_cv_abs(long a) {
return __builtin_abs(a);
}
static __inline__ long __DEFAULT_FN_ATTRS __riscv_cv_alu_slet(long a, long b) {
return __builtin_riscv_cv_alu_slet(a, b);
static __inline__ long __DEFAULT_FN_ATTRS __riscv_cv_alu_sle(long a, long b) {
return __builtin_riscv_cv_alu_sle(a, b);
}
static __inline__ long __DEFAULT_FN_ATTRS
__riscv_cv_alu_sletu(unsigned long a, unsigned long b) {
return __builtin_riscv_cv_alu_sletu(a, b);
__riscv_cv_alu_sleu(unsigned long a, unsigned long b) {
return __builtin_riscv_cv_alu_sleu(a, b);
}
static __inline__ long __DEFAULT_FN_ATTRS __riscv_cv_alu_min(long a, long b) {

View File

@ -49,7 +49,6 @@ enum __RISCV_FRM {
#define __riscv_vsetvl_e32m4(avl) __builtin_rvv_vsetvli((size_t)(avl), 2, 2)
#define __riscv_vsetvl_e32m8(avl) __builtin_rvv_vsetvli((size_t)(avl), 2, 3)
#if __riscv_v_elen >= 64
#define __riscv_vsetvl_e8mf8(avl) __builtin_rvv_vsetvli((size_t)(avl), 0, 5)
#define __riscv_vsetvl_e16mf4(avl) __builtin_rvv_vsetvli((size_t)(avl), 1, 6)
#define __riscv_vsetvl_e32mf2(avl) __builtin_rvv_vsetvli((size_t)(avl), 2, 7)
@ -58,7 +57,6 @@ enum __RISCV_FRM {
#define __riscv_vsetvl_e64m2(avl) __builtin_rvv_vsetvli((size_t)(avl), 3, 1)
#define __riscv_vsetvl_e64m4(avl) __builtin_rvv_vsetvli((size_t)(avl), 3, 2)
#define __riscv_vsetvl_e64m8(avl) __builtin_rvv_vsetvli((size_t)(avl), 3, 3)
#endif
#define __riscv_vsetvlmax_e8mf4() __builtin_rvv_vsetvlimax(0, 6)
#define __riscv_vsetvlmax_e8mf2() __builtin_rvv_vsetvlimax(0, 7)
@ -78,7 +76,6 @@ enum __RISCV_FRM {
#define __riscv_vsetvlmax_e32m4() __builtin_rvv_vsetvlimax(2, 2)
#define __riscv_vsetvlmax_e32m8() __builtin_rvv_vsetvlimax(2, 3)
#if __riscv_v_elen >= 64
#define __riscv_vsetvlmax_e8mf8() __builtin_rvv_vsetvlimax(0, 5)
#define __riscv_vsetvlmax_e16mf4() __builtin_rvv_vsetvlimax(1, 6)
#define __riscv_vsetvlmax_e32mf2() __builtin_rvv_vsetvlimax(2, 7)
@ -87,7 +84,6 @@ enum __RISCV_FRM {
#define __riscv_vsetvlmax_e64m2() __builtin_rvv_vsetvlimax(3, 1)
#define __riscv_vsetvlmax_e64m4() __builtin_rvv_vsetvlimax(3, 2)
#define __riscv_vsetvlmax_e64m8() __builtin_rvv_vsetvlimax(3, 3)
#endif
enum __RISCV_VXRM {

View File

@ -48,7 +48,8 @@
/// combining functions and rounding constants (not specified here).
/// \returns A 128-bit vector of [4 x i32] containing the updated SHA-1 state.
#define _mm_sha1rnds4_epu32(V1, V2, M) \
__builtin_ia32_sha1rnds4((__v4si)(__m128i)(V1), (__v4si)(__m128i)(V2), (M))
((__m128i)__builtin_ia32_sha1rnds4((__v4si)(__m128i)(V1), \
(__v4si)(__m128i)(V2), (M)))
/// Calculates the SHA-1 state variable E from the SHA-1 state variables in
/// the 128-bit vector of [4 x i32] in \a __X, adds that to the next set of

15
lib/include/stdcountof.h vendored Normal file
View File

@ -0,0 +1,15 @@
/*===---- stdcountof.h - Standard header for countof -----------------------===
*
* Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
* See https://llvm.org/LICENSE.txt for license information.
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*
*===-----------------------------------------------------------------------===
*/
#ifndef __STDCOUNTOF_H
#define __STDCOUNTOF_H
#define countof _Countof
#endif /* __STDCOUNTOF_H */

147
lib/include/stdint.h vendored
View File

@ -317,166 +317,55 @@ typedef __UINTMAX_TYPE__ uintmax_t;
* integer width that the target implements, so corresponding macros are
* defined below, too.
*
* These macros are defined using the same successive-shrinking approach as
* the type definitions above. It is likewise important that macros are defined
* in order of decending width.
*
* Note that C++ should not check __STDC_CONSTANT_MACROS here, contrary to the
* claims of the C standard (see C++ 18.3.1p2, [cstdint.syn]).
*/
#define __int_c_join(a, b) a ## b
#define __int_c(v, suffix) __int_c_join(v, suffix)
#define __uint_c(v, suffix) __int_c_join(v##U, suffix)
#ifdef __INT64_TYPE__
# undef __int64_c_suffix
# undef __int32_c_suffix
# undef __int16_c_suffix
# undef __int8_c_suffix
# ifdef __INT64_C_SUFFIX__
# define __int64_c_suffix __INT64_C_SUFFIX__
# define __int32_c_suffix __INT64_C_SUFFIX__
# define __int16_c_suffix __INT64_C_SUFFIX__
# define __int8_c_suffix __INT64_C_SUFFIX__
# endif /* __INT64_C_SUFFIX__ */
#endif /* __INT64_TYPE__ */
#ifdef __int_least64_t
# ifdef __int64_c_suffix
# define INT64_C(v) __int_c(v, __int64_c_suffix)
# define UINT64_C(v) __uint_c(v, __int64_c_suffix)
# else
# define INT64_C(v) v
# define UINT64_C(v) v ## U
# endif /* __int64_c_suffix */
#define INT64_C(v) __INT64_C(v)
#define UINT64_C(v) __UINT64_C(v)
#endif /* __int_least64_t */
#ifdef __INT56_TYPE__
# undef __int32_c_suffix
# undef __int16_c_suffix
# undef __int8_c_suffix
# ifdef __INT56_C_SUFFIX__
# define INT56_C(v) __int_c(v, __INT56_C_SUFFIX__)
# define UINT56_C(v) __uint_c(v, __INT56_C_SUFFIX__)
# define __int32_c_suffix __INT56_C_SUFFIX__
# define __int16_c_suffix __INT56_C_SUFFIX__
# define __int8_c_suffix __INT56_C_SUFFIX__
# else
# define INT56_C(v) v
# define UINT56_C(v) v ## U
# endif /* __INT56_C_SUFFIX__ */
#define INT56_C(v) __INT56_C(v)
#define UINT56_C(v) __UINT56_C(v)
#endif /* __INT56_TYPE__ */
#ifdef __INT48_TYPE__
# undef __int32_c_suffix
# undef __int16_c_suffix
# undef __int8_c_suffix
# ifdef __INT48_C_SUFFIX__
# define INT48_C(v) __int_c(v, __INT48_C_SUFFIX__)
# define UINT48_C(v) __uint_c(v, __INT48_C_SUFFIX__)
# define __int32_c_suffix __INT48_C_SUFFIX__
# define __int16_c_suffix __INT48_C_SUFFIX__
# define __int8_c_suffix __INT48_C_SUFFIX__
# else
# define INT48_C(v) v
# define UINT48_C(v) v ## U
# endif /* __INT48_C_SUFFIX__ */
#define INT48_C(v) __INT48_C(v)
#define UINT48_C(v) __UINT48_C(v)
#endif /* __INT48_TYPE__ */
#ifdef __INT40_TYPE__
# undef __int32_c_suffix
# undef __int16_c_suffix
# undef __int8_c_suffix
# ifdef __INT40_C_SUFFIX__
# define INT40_C(v) __int_c(v, __INT40_C_SUFFIX__)
# define UINT40_C(v) __uint_c(v, __INT40_C_SUFFIX__)
# define __int32_c_suffix __INT40_C_SUFFIX__
# define __int16_c_suffix __INT40_C_SUFFIX__
# define __int8_c_suffix __INT40_C_SUFFIX__
# else
# define INT40_C(v) v
# define UINT40_C(v) v ## U
# endif /* __INT40_C_SUFFIX__ */
#define INT40_C(v) __INT40_C(v)
#define UINT40_C(v) __UINT40_C(v)
#endif /* __INT40_TYPE__ */
#ifdef __INT32_TYPE__
# undef __int32_c_suffix
# undef __int16_c_suffix
# undef __int8_c_suffix
# ifdef __INT32_C_SUFFIX__
# define __int32_c_suffix __INT32_C_SUFFIX__
# define __int16_c_suffix __INT32_C_SUFFIX__
# define __int8_c_suffix __INT32_C_SUFFIX__
# endif /* __INT32_C_SUFFIX__ */
#endif /* __INT32_TYPE__ */
#ifdef __int_least32_t
# ifdef __int32_c_suffix
# define INT32_C(v) __int_c(v, __int32_c_suffix)
# define UINT32_C(v) __uint_c(v, __int32_c_suffix)
# else
# define INT32_C(v) v
# define UINT32_C(v) v ## U
# endif /* __int32_c_suffix */
#define INT32_C(v) __INT32_C(v)
#define UINT32_C(v) __UINT32_C(v)
#endif /* __int_least32_t */
#ifdef __INT24_TYPE__
# undef __int16_c_suffix
# undef __int8_c_suffix
# ifdef __INT24_C_SUFFIX__
# define INT24_C(v) __int_c(v, __INT24_C_SUFFIX__)
# define UINT24_C(v) __uint_c(v, __INT24_C_SUFFIX__)
# define __int16_c_suffix __INT24_C_SUFFIX__
# define __int8_c_suffix __INT24_C_SUFFIX__
# else
# define INT24_C(v) v
# define UINT24_C(v) v ## U
# endif /* __INT24_C_SUFFIX__ */
#define INT24_C(v) __INT24_C(v)
#define UINT24_C(v) __UINT24_C(v)
#endif /* __INT24_TYPE__ */
#ifdef __INT16_TYPE__
# undef __int16_c_suffix
# undef __int8_c_suffix
# ifdef __INT16_C_SUFFIX__
# define __int16_c_suffix __INT16_C_SUFFIX__
# define __int8_c_suffix __INT16_C_SUFFIX__
# endif /* __INT16_C_SUFFIX__ */
#endif /* __INT16_TYPE__ */
#ifdef __int_least16_t
# ifdef __int16_c_suffix
# define INT16_C(v) __int_c(v, __int16_c_suffix)
# define UINT16_C(v) __uint_c(v, __int16_c_suffix)
# else
# define INT16_C(v) v
# define UINT16_C(v) v ## U
# endif /* __int16_c_suffix */
#define INT16_C(v) __INT16_C(v)
#define UINT16_C(v) __UINT16_C(v)
#endif /* __int_least16_t */
#ifdef __INT8_TYPE__
# undef __int8_c_suffix
# ifdef __INT8_C_SUFFIX__
# define __int8_c_suffix __INT8_C_SUFFIX__
# endif /* __INT8_C_SUFFIX__ */
#endif /* __INT8_TYPE__ */
#ifdef __int_least8_t
# ifdef __int8_c_suffix
# define INT8_C(v) __int_c(v, __int8_c_suffix)
# define UINT8_C(v) __uint_c(v, __int8_c_suffix)
# else
# define INT8_C(v) v
# define UINT8_C(v) v ## U
# endif /* __int8_c_suffix */
#define INT8_C(v) __INT8_C(v)
#define UINT8_C(v) __UINT8_C(v)
#endif /* __int_least8_t */
@ -938,8 +827,8 @@ typedef __UINTMAX_TYPE__ uintmax_t;
#endif
/* 7.18.4.2 Macros for greatest-width integer constants. */
#define INTMAX_C(v) __int_c(v, __INTMAX_C_SUFFIX__)
#define UINTMAX_C(v) __int_c(v, __UINTMAX_C_SUFFIX__)
#define INTMAX_C(v) __INTMAX_C(v)
#define UINTMAX_C(v) __UINTMAX_C(v)
/* C23 7.22.3.x Width of other integer types. */
#if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 202311L

View File

@ -7,6 +7,9 @@
*===-----------------------------------------------------------------------===
*/
#ifndef _VECINTRIN_H
#define _VECINTRIN_H
#if defined(__s390x__) && defined(__VEC__)
#define __ATTRS_ai __attribute__((__always_inline__))
@ -12861,3 +12864,5 @@ vec_search_string_until_zero_cc(__vector unsigned int __a,
#error "Use -fzvector to enable vector extensions"
#endif
#endif /* _VECINTRIN_H */

View File

@ -10,33 +10,19 @@
#ifndef __X86GPRINTRIN_H
#define __X86GPRINTRIN_H
#if !defined(__SCE__) || __has_feature(modules) || defined(__HRESET__)
#include <hresetintrin.h>
#endif
#if !defined(__SCE__) || __has_feature(modules) || defined(__UINTR__)
#include <uintrintrin.h>
#endif
#if !defined(__SCE__) || __has_feature(modules) || defined(__USERMSR__)
#include <usermsrintrin.h>
#endif
#if !defined(__SCE__) || __has_feature(modules) || defined(__CRC32__)
#include <crc32intrin.h>
#endif
#if !defined(__SCE__) || __has_feature(modules) || defined(__PRFCHI__)
#include <prfchiintrin.h>
#endif
#if !defined(__SCE__) || __has_feature(modules) || defined(__RAOINT__)
#include <raointintrin.h>
#endif
#if !defined(__SCE__) || __has_feature(modules) || defined(__CMPCCXADD__)
#include <cmpccxaddintrin.h>
#endif
#if defined(__i386__)
#define __SAVE_GPRBX "mov {%%ebx, %%eax |eax, ebx};"

View File

@ -14,40 +14,22 @@
#include <immintrin.h>
#if !defined(__SCE__) || __has_feature(modules) || defined(__PRFCHW__)
#include <prfchwintrin.h>
#endif
#if !defined(__SCE__) || __has_feature(modules) || defined(__SSE4A__)
#include <ammintrin.h>
#endif
#if !defined(__SCE__) || __has_feature(modules) || defined(__FMA4__)
#include <fma4intrin.h>
#endif
#if !defined(__SCE__) || __has_feature(modules) || defined(__XOP__)
#include <xopintrin.h>
#endif
#if !defined(__SCE__) || __has_feature(modules) || defined(__TBM__)
#include <tbmintrin.h>
#endif
#if !defined(__SCE__) || __has_feature(modules) || defined(__LWP__)
#include <lwpintrin.h>
#endif
#if !defined(__SCE__) || __has_feature(modules) || defined(__MWAITX__)
#include <mwaitxintrin.h>
#endif
#if !defined(__SCE__) || __has_feature(modules) || defined(__CLZERO__)
#include <clzerointrin.h>
#endif
#if !defined(__SCE__) || __has_feature(modules) || defined(__RDPRU__)
#include <rdpruintrin.h>
#endif
#endif /* __X86INTRIN_H */

View File

@ -2198,8 +2198,9 @@ _mm_storer_ps(float *__p, __m128 __a)
#define _MM_HINT_NTA 0
#ifndef _MSC_VER
/* FIXME: We have to #define this because "sel" must be a constant integer, and
Sema doesn't do any form of constant propagation yet. */
// If _MSC_VER is defined, we use the builtin variant of _mm_prefetch.
// Otherwise, we provide this macro, which includes a cast, allowing the user
// to pass a pointer of any time. The _mm_prefetch accepts char to match MSVC.
/// Loads one cache line of data from the specified address to a location
/// closer to the processor.