Merge pull request #25055 from ziglang/llvm21

LLVM 21
This commit is contained in:
Alex Rønne Petersen 2025-08-30 21:00:53 +02:00 committed by GitHub
commit 151314346d
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
773 changed files with 52047 additions and 40073 deletions

View File

@ -133,9 +133,9 @@ else()
set(ZIG_SYSTEM_LIBCXX "stdc++" CACHE STRING "system libcxx name for build.zig")
endif()
find_package(llvm 20)
find_package(clang 20)
find_package(lld 20)
find_package(llvm 21)
find_package(clang 21)
find_package(lld 21)
if(ZIG_STATIC_ZLIB)
if (MSVC)

View File

@ -1176,7 +1176,6 @@ const clang_libs = [_][]const u8{
"clangBasic",
"clangEdit",
"clangLex",
"clangARCMigrate",
"clangRewriteFrontend",
"clangRewrite",
"clangCrossTU",
@ -1322,30 +1321,31 @@ const llvm_libs = [_][]const u8{
"LLVMOrcTargetProcess",
"LLVMOrcShared",
"LLVMDWP",
"LLVMDWARFCFIChecker",
"LLVMDebugInfoLogicalView",
"LLVMDebugInfoGSYM",
"LLVMOption",
"LLVMObjectYAML",
"LLVMObjCopy",
"LLVMMCA",
"LLVMMCDisassembler",
"LLVMLTO",
"LLVMPasses",
"LLVMHipStdPar",
"LLVMCFGuard",
"LLVMCoroutines",
"LLVMipo",
"LLVMVectorize",
"LLVMSandboxIR",
"LLVMLinker",
"LLVMInstrumentation",
"LLVMFrontendOpenMP",
"LLVMFrontendOffloading",
"LLVMFrontendOpenACC",
"LLVMFrontendHLSL",
"LLVMFrontendDriver",
"LLVMFrontendAtomic",
"LLVMExtensions",
"LLVMPasses",
"LLVMHipStdPar",
"LLVMCoroutines",
"LLVMCFGuard",
"LLVMipo",
"LLVMInstrumentation",
"LLVMVectorize",
"LLVMSandboxIR",
"LLVMLinker",
"LLVMFrontendOpenMP",
"LLVMFrontendDirective",
"LLVMFrontendAtomic",
"LLVMFrontendOffloading",
"LLVMObjectYAML",
"LLVMDWARFLinkerParallel",
"LLVMDWARFLinkerClassic",
"LLVMDWARFLinker",
@ -1374,7 +1374,9 @@ const llvm_libs = [_][]const u8{
"LLVMDebugInfoPDB",
"LLVMDebugInfoMSF",
"LLVMDebugInfoCodeView",
"LLVMDebugInfoGSYM",
"LLVMDebugInfoDWARF",
"LLVMDebugInfoDWARFLowLevel",
"LLVMObject",
"LLVMTextAPI",
"LLVMMCParser",

View File

@ -8,7 +8,7 @@ set -e
ARCH="$(uname -m)"
TARGET="$ARCH-linux-musl"
MCPU="baseline"
CACHE_BASENAME="zig+llvm+lld+clang-$TARGET-0.15.0-dev.233+7c85dc460"
CACHE_BASENAME="zig+llvm+lld+clang-$TARGET-0.16.0-dev.104+689461e31"
PREFIX="$HOME/deps/$CACHE_BASENAME"
ZIG="$PREFIX/bin/zig"

View File

@ -8,7 +8,7 @@ set -e
ARCH="$(uname -m)"
TARGET="$ARCH-linux-musl"
MCPU="baseline"
CACHE_BASENAME="zig+llvm+lld+clang-$TARGET-0.15.0-dev.233+7c85dc460"
CACHE_BASENAME="zig+llvm+lld+clang-$TARGET-0.16.0-dev.104+689461e31"
PREFIX="$HOME/deps/$CACHE_BASENAME"
ZIG="$PREFIX/bin/zig"

View File

@ -9,7 +9,7 @@ set -e
ZIGDIR="$PWD"
TARGET="$ARCH-macos-none"
MCPU="baseline"
CACHE_BASENAME="zig+llvm+lld+clang-$TARGET-0.15.0-dev.233+7c85dc460"
CACHE_BASENAME="zig+llvm+lld+clang-$TARGET-0.16.0-dev.104+689461e31"
PREFIX="$HOME/$CACHE_BASENAME"
ZIG="$PREFIX/bin/zig"

View File

@ -9,7 +9,7 @@ set -e
ZIGDIR="$PWD"
TARGET="$ARCH-macos-none"
MCPU="baseline"
CACHE_BASENAME="zig+llvm+lld+clang-$TARGET-0.15.0-dev.233+7c85dc460"
CACHE_BASENAME="zig+llvm+lld+clang-$TARGET-0.16.0-dev.104+689461e31"
PREFIX="$HOME/$CACHE_BASENAME"
ZIG="$PREFIX/bin/zig"

View File

@ -1,5 +1,5 @@
$TARGET = "$($Env:ARCH)-windows-gnu"
$ZIG_LLVM_CLANG_LLD_NAME = "zig+llvm+lld+clang-$TARGET-0.15.0-dev.233+7c85dc460"
$ZIG_LLVM_CLANG_LLD_NAME = "zig+llvm+lld+clang-$TARGET-0.16.0-dev.104+689461e31"
$MCPU = "baseline"
$ZIG_LLVM_CLANG_LLD_URL = "https://ziglang.org/deps/$ZIG_LLVM_CLANG_LLD_NAME.zip"
$PREFIX_PATH = "$(Get-Location)\..\$ZIG_LLVM_CLANG_LLD_NAME"

View File

@ -8,7 +8,7 @@ set -e
ARCH="$(uname -m)"
TARGET="$ARCH-linux-musl"
MCPU="spacemit_x60"
CACHE_BASENAME="zig+llvm+lld+clang-riscv64-linux-musl-0.15.0-dev.929+31e46be74"
CACHE_BASENAME="zig+llvm+lld+clang-riscv64-linux-musl-0.16.0-dev.104+689461e31"
PREFIX="$HOME/deps/$CACHE_BASENAME"
ZIG="$PREFIX/bin/zig"

View File

@ -8,7 +8,7 @@ set -e
ARCH="$(uname -m)"
TARGET="$ARCH-linux-musl"
MCPU="spacemit_x60"
CACHE_BASENAME="zig+llvm+lld+clang-riscv64-linux-musl-0.15.0-dev.929+31e46be74"
CACHE_BASENAME="zig+llvm+lld+clang-riscv64-linux-musl-0.16.0-dev.104+689461e31"
PREFIX="$HOME/deps/$CACHE_BASENAME"
ZIG="$PREFIX/bin/zig"

View File

@ -8,7 +8,7 @@ set -e
ARCH="$(uname -m)"
TARGET="$ARCH-linux-musl"
MCPU="baseline"
CACHE_BASENAME="zig+llvm+lld+clang-$TARGET-0.15.0-dev.233+7c85dc460"
CACHE_BASENAME="zig+llvm+lld+clang-$TARGET-0.16.0-dev.104+689461e31"
PREFIX="$HOME/deps/$CACHE_BASENAME"
ZIG="$PREFIX/bin/zig"

View File

@ -8,7 +8,7 @@ set -e
ARCH="$(uname -m)"
TARGET="$ARCH-linux-musl"
MCPU="baseline"
CACHE_BASENAME="zig+llvm+lld+clang-$TARGET-0.15.0-dev.233+7c85dc460"
CACHE_BASENAME="zig+llvm+lld+clang-$TARGET-0.16.0-dev.104+689461e31"
PREFIX="$HOME/deps/$CACHE_BASENAME"
ZIG="$PREFIX/bin/zig"

View File

@ -8,7 +8,7 @@ set -e
ARCH="$(uname -m)"
TARGET="$ARCH-linux-musl"
MCPU="baseline"
CACHE_BASENAME="zig+llvm+lld+clang-$TARGET-0.15.0-dev.233+7c85dc460"
CACHE_BASENAME="zig+llvm+lld+clang-$TARGET-0.16.0-dev.104+689461e31"
PREFIX="$HOME/deps/$CACHE_BASENAME"
ZIG="$PREFIX/bin/zig"

View File

@ -6,7 +6,7 @@ set -e
ZIGDIR="$PWD"
TARGET="$ARCH-macos-none"
MCPU="baseline"
CACHE_BASENAME="zig+llvm+lld+clang-$TARGET-0.15.0-dev.233+7c85dc460"
CACHE_BASENAME="zig+llvm+lld+clang-$TARGET-0.16.0-dev.104+689461e31"
PREFIX="$HOME/$CACHE_BASENAME"
JOBS="-j3"
ZIG="$PREFIX/bin/zig"

View File

@ -1,5 +1,5 @@
$TARGET = "$($Env:ARCH)-windows-gnu"
$ZIG_LLVM_CLANG_LLD_NAME = "zig+llvm+lld+clang-$TARGET-0.15.0-dev.233+7c85dc460"
$ZIG_LLVM_CLANG_LLD_NAME = "zig+llvm+lld+clang-$TARGET-0.16.0-dev.104+689461e31"
$MCPU = "baseline"
$ZIG_LLVM_CLANG_LLD_URL = "https://ziglang.org/deps/$ZIG_LLVM_CLANG_LLD_NAME.zip"
$PREFIX_PATH = "$($Env:USERPROFILE)\$ZIG_LLVM_CLANG_LLD_NAME"

View File

@ -1,5 +1,5 @@
$TARGET = "$($Env:ARCH)-windows-gnu"
$ZIG_LLVM_CLANG_LLD_NAME = "zig+llvm+lld+clang-$TARGET-0.15.0-dev.233+7c85dc460"
$ZIG_LLVM_CLANG_LLD_NAME = "zig+llvm+lld+clang-$TARGET-0.16.0-dev.104+689461e31"
$MCPU = "baseline"
$ZIG_LLVM_CLANG_LLD_URL = "https://ziglang.org/deps/$ZIG_LLVM_CLANG_LLD_NAME.zip"
$PREFIX_PATH = "$($Env:USERPROFILE)\$ZIG_LLVM_CLANG_LLD_NAME"

View File

@ -17,10 +17,10 @@ find_path(CLANG_INCLUDE_DIRS NAMES clang/Frontend/ASTUnit.h
if(${LLVM_LINK_MODE} STREQUAL "shared")
find_library(CLANG_LIBRARIES
NAMES
libclang-cpp.so.20
libclang-cpp.so.20.1
clang-cpp-20.0
clang-cpp200
libclang-cpp.so.21
libclang-cpp.so.21.1
clang-cpp-21.0
clang-cpp210
clang-cpp
NAMES_PER_DIR
HINTS "${LLVM_LIBDIRS}"
@ -60,7 +60,6 @@ else()
FIND_AND_ADD_CLANG_LIB(clangBasic)
FIND_AND_ADD_CLANG_LIB(clangEdit)
FIND_AND_ADD_CLANG_LIB(clangLex)
FIND_AND_ADD_CLANG_LIB(clangARCMigrate)
FIND_AND_ADD_CLANG_LIB(clangRewriteFrontend)
FIND_AND_ADD_CLANG_LIB(clangRewrite)
FIND_AND_ADD_CLANG_LIB(clangCrossTU)

View File

@ -9,23 +9,23 @@
find_path(LLD_INCLUDE_DIRS NAMES lld/Common/Driver.h
HINTS ${LLVM_INCLUDE_DIRS}
PATHS
/usr/lib/llvm-20/include
/usr/local/llvm200/include
/usr/local/llvm20/include
/usr/local/opt/lld@20/include
/opt/homebrew/opt/lld@20/include
/home/linuxbrew/.linuxbrew/opt/lld@20/include
/usr/lib/llvm-21/include
/usr/local/llvm210/include
/usr/local/llvm21/include
/usr/local/opt/lld@21/include
/opt/homebrew/opt/lld@21/include
/home/linuxbrew/.linuxbrew/opt/lld@21/include
/mingw64/include)
find_library(LLD_LIBRARY NAMES lld-20.0 lld200 lld NAMES_PER_DIR
find_library(LLD_LIBRARY NAMES lld-21.0 lld210 lld NAMES_PER_DIR
HINTS ${LLVM_LIBDIRS}
PATHS
/usr/lib/llvm-20/lib
/usr/local/llvm200/lib
/usr/local/llvm20/lib
/usr/local/opt/lld@20/lib
/opt/homebrew/opt/lld@20/lib
/home/linuxbrew/.linuxbrew/opt/lld@20/lib
/usr/lib/llvm-21/lib
/usr/local/llvm210/lib
/usr/local/llvm21/lib
/usr/local/opt/lld@21/lib
/opt/homebrew/opt/lld@21/lib
/home/linuxbrew/.linuxbrew/opt/lld@21/lib
)
if(EXISTS ${LLD_LIBRARY})
set(LLD_LIBRARIES ${LLD_LIBRARY})
@ -36,12 +36,12 @@ else()
HINTS ${LLVM_LIBDIRS}
PATHS
${LLD_LIBDIRS}
/usr/lib/llvm-20/lib
/usr/local/llvm200/lib
/usr/local/llvm20/lib
/usr/local/opt/lld@20/lib
/opt/homebrew/opt/lld@20/lib
/home/linuxbrew/.linuxbrew/opt/lld@20/lib
/usr/lib/llvm-21/lib
/usr/local/llvm210/lib
/usr/local/llvm21/lib
/usr/local/opt/lld@21/lib
/opt/homebrew/opt/lld@21/lib
/home/linuxbrew/.linuxbrew/opt/lld@21/lib
/mingw64/lib
/c/msys64/mingw64/lib
c:/msys64/mingw64/lib)

View File

@ -17,12 +17,12 @@ if(ZIG_USE_LLVM_CONFIG)
# terminate when the right LLVM version is not found.
unset(LLVM_CONFIG_EXE CACHE)
find_program(LLVM_CONFIG_EXE
NAMES llvm-config-20 llvm-config-20.0 llvm-config200 llvm-config20 llvm-config NAMES_PER_DIR
NAMES llvm-config-21 llvm-config-21.0 llvm-config210 llvm-config21 llvm-config NAMES_PER_DIR
PATHS
"/mingw64/bin"
"/c/msys64/mingw64/bin"
"c:/msys64/mingw64/bin"
"C:/Libraries/llvm-20.0.0/bin")
"C:/Libraries/llvm-21.0.0/bin")
if ("${LLVM_CONFIG_EXE}" STREQUAL "LLVM_CONFIG_EXE-NOTFOUND")
if (NOT LLVM_CONFIG_ERROR_MESSAGES STREQUAL "")
@ -40,9 +40,9 @@ if(ZIG_USE_LLVM_CONFIG)
OUTPUT_STRIP_TRAILING_WHITESPACE)
get_filename_component(LLVM_CONFIG_DIR "${LLVM_CONFIG_EXE}" DIRECTORY)
if("${LLVM_CONFIG_VERSION}" VERSION_LESS 20 OR "${LLVM_CONFIG_VERSION}" VERSION_EQUAL 21 OR "${LLVM_CONFIG_VERSION}" VERSION_GREATER 21)
if("${LLVM_CONFIG_VERSION}" VERSION_LESS 21 OR "${LLVM_CONFIG_VERSION}" VERSION_EQUAL 22 OR "${LLVM_CONFIG_VERSION}" VERSION_GREATER 22)
# Save the error message, in case this is the last llvm-config we find
list(APPEND LLVM_CONFIG_ERROR_MESSAGES "expected LLVM 20.x but found ${LLVM_CONFIG_VERSION} using ${LLVM_CONFIG_EXE}")
list(APPEND LLVM_CONFIG_ERROR_MESSAGES "expected LLVM 21.x but found ${LLVM_CONFIG_VERSION} using ${LLVM_CONFIG_EXE}")
# Ignore this directory and try the search again
list(APPEND CMAKE_IGNORE_PATH "${LLVM_CONFIG_DIR}")
@ -66,9 +66,9 @@ if(ZIG_USE_LLVM_CONFIG)
if (LLVM_CONFIG_ERROR)
# Save the error message, in case this is the last llvm-config we find
if (ZIG_SHARED_LLVM)
list(APPEND LLVM_CONFIG_ERROR_MESSAGES "LLVM 20.x found at ${LLVM_CONFIG_EXE} does not support linking as a shared library")
list(APPEND LLVM_CONFIG_ERROR_MESSAGES "LLVM 21.x found at ${LLVM_CONFIG_EXE} does not support linking as a shared library")
else()
list(APPEND LLVM_CONFIG_ERROR_MESSAGES "LLVM 20.x found at ${LLVM_CONFIG_EXE} does not support linking as a static library")
list(APPEND LLVM_CONFIG_ERROR_MESSAGES "LLVM 21.x found at ${LLVM_CONFIG_EXE} does not support linking as a static library")
endif()
# Ignore this directory and try the search again
@ -315,30 +315,31 @@ else()
FIND_AND_ADD_LLVM_LIB(LLVMOrcTargetProcess)
FIND_AND_ADD_LLVM_LIB(LLVMOrcShared)
FIND_AND_ADD_LLVM_LIB(LLVMDWP)
FIND_AND_ADD_LLVM_LIB(LLVMDWARFCFIChecker)
FIND_AND_ADD_LLVM_LIB(LLVMDebugInfoLogicalView)
FIND_AND_ADD_LLVM_LIB(LLVMDebugInfoGSYM)
FIND_AND_ADD_LLVM_LIB(LLVMOption)
FIND_AND_ADD_LLVM_LIB(LLVMObjectYAML)
FIND_AND_ADD_LLVM_LIB(LLVMObjCopy)
FIND_AND_ADD_LLVM_LIB(LLVMMCA)
FIND_AND_ADD_LLVM_LIB(LLVMMCDisassembler)
FIND_AND_ADD_LLVM_LIB(LLVMLTO)
FIND_AND_ADD_LLVM_LIB(LLVMPasses)
FIND_AND_ADD_LLVM_LIB(LLVMHipStdPar)
FIND_AND_ADD_LLVM_LIB(LLVMCFGuard)
FIND_AND_ADD_LLVM_LIB(LLVMCoroutines)
FIND_AND_ADD_LLVM_LIB(LLVMipo)
FIND_AND_ADD_LLVM_LIB(LLVMVectorize)
FIND_AND_ADD_LLVM_LIB(LLVMSandboxIR)
FIND_AND_ADD_LLVM_LIB(LLVMLinker)
FIND_AND_ADD_LLVM_LIB(LLVMInstrumentation)
FIND_AND_ADD_LLVM_LIB(LLVMFrontendOpenMP)
FIND_AND_ADD_LLVM_LIB(LLVMFrontendOffloading)
FIND_AND_ADD_LLVM_LIB(LLVMFrontendOpenACC)
FIND_AND_ADD_LLVM_LIB(LLVMFrontendHLSL)
FIND_AND_ADD_LLVM_LIB(LLVMFrontendDriver)
FIND_AND_ADD_LLVM_LIB(LLVMFrontendAtomic)
FIND_AND_ADD_LLVM_LIB(LLVMExtensions)
FIND_AND_ADD_LLVM_LIB(LLVMPasses)
FIND_AND_ADD_LLVM_LIB(LLVMHipStdPar)
FIND_AND_ADD_LLVM_LIB(LLVMCoroutines)
FIND_AND_ADD_LLVM_LIB(LLVMCFGuard)
FIND_AND_ADD_LLVM_LIB(LLVMipo)
FIND_AND_ADD_LLVM_LIB(LLVMInstrumentation)
FIND_AND_ADD_LLVM_LIB(LLVMVectorize)
FIND_AND_ADD_LLVM_LIB(LLVMSandboxIR)
FIND_AND_ADD_LLVM_LIB(LLVMLinker)
FIND_AND_ADD_LLVM_LIB(LLVMFrontendOpenMP)
FIND_AND_ADD_LLVM_LIB(LLVMFrontendDirective)
FIND_AND_ADD_LLVM_LIB(LLVMFrontendAtomic)
FIND_AND_ADD_LLVM_LIB(LLVMFrontendOffloading)
FIND_AND_ADD_LLVM_LIB(LLVMObjectYAML)
FIND_AND_ADD_LLVM_LIB(LLVMDWARFLinkerParallel)
FIND_AND_ADD_LLVM_LIB(LLVMDWARFLinkerClassic)
FIND_AND_ADD_LLVM_LIB(LLVMDWARFLinker)
@ -367,7 +368,9 @@ else()
FIND_AND_ADD_LLVM_LIB(LLVMDebugInfoPDB)
FIND_AND_ADD_LLVM_LIB(LLVMDebugInfoMSF)
FIND_AND_ADD_LLVM_LIB(LLVMDebugInfoCodeView)
FIND_AND_ADD_LLVM_LIB(LLVMDebugInfoGSYM)
FIND_AND_ADD_LLVM_LIB(LLVMDebugInfoDWARF)
FIND_AND_ADD_LLVM_LIB(LLVMDebugInfoDWARFLowLevel)
FIND_AND_ADD_LLVM_LIB(LLVMObject)
FIND_AND_ADD_LLVM_LIB(LLVMTextAPI)
FIND_AND_ADD_LLVM_LIB(LLVMMCParser)

View File

@ -4,7 +4,6 @@ const common = @import("common.zig");
comptime {
@export(&strcmp, .{ .name = "strcmp", .linkage = common.linkage, .visibility = common.visibility });
@export(&strlen, .{ .name = "strlen", .linkage = common.linkage, .visibility = common.visibility });
@export(&strncmp, .{ .name = "strncmp", .linkage = common.linkage, .visibility = common.visibility });
@export(&strcasecmp, .{ .name = "strcasecmp", .linkage = common.linkage, .visibility = common.visibility });
@export(&strncasecmp, .{ .name = "strncasecmp", .linkage = common.linkage, .visibility = common.visibility });
@ -103,7 +102,3 @@ test strncmp {
try std.testing.expect(strncmp(@ptrCast("b"), @ptrCast("a"), 1) > 0);
try std.testing.expect(strncmp(@ptrCast("\xff"), @ptrCast("\x02"), 1) > 0);
}
fn strlen(s: [*:0]const c_char) callconv(.c) usize {
return std.mem.len(s);
}

View File

@ -667,6 +667,7 @@ pub fn toLLVMTriple(target: std.Target, buf: []u8) []const u8 {
.driverkit => "driverkit",
.visionos => "xros",
.serenity => "serenity",
.managarm => "managarm",
.opencl,
.opengl,
.vulkan,

View File

@ -263,6 +263,8 @@ comptime {
_ = @import("compiler_rt/memcmp.zig");
_ = @import("compiler_rt/bcmp.zig");
_ = @import("compiler_rt/ssp.zig");
_ = @import("compiler_rt/strlen.zig");
}
// Temporarily used for uefi until https://github.com/ziglang/zig/issues/21630 is addressed.

View File

@ -123,16 +123,19 @@ pub fn F16T(comptime OtherType: type) type {
.thumbeb,
.aarch64,
.aarch64_be,
.hexagon,
.loongarch32,
.loongarch64,
.nvptx,
.nvptx64,
.riscv32,
.riscv32be,
.riscv64,
.riscv64be,
.s390x,
.spirv32,
.spirv64,
=> f16,
.hexagon => if (builtin.target.cpu.has(.hexagon, .v68)) f16 else u16,
.x86, .x86_64 => if (builtin.target.os.tag.isDarwin()) switch (OtherType) {
// Starting with LLVM 16, Darwin uses different abi for f16
// depending on the type of the other return/argument..???

View File

@ -0,0 +1,10 @@
const std = @import("std");
const common = @import("common.zig");
comptime {
@export(&strlen, .{ .name = "strlen", .linkage = common.linkage, .visibility = common.visibility });
}
fn strlen(s: [*:0]const c_char) callconv(.c) usize {
return std.mem.len(s);
}

217
lib/include/__clang_spirv_builtins.h vendored Normal file
View File

@ -0,0 +1,217 @@
/*===---- spirv_builtin_vars.h - SPIR-V built-in ---------------------------===
*
* Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
* See https://llvm.org/LICENSE.txt for license information.
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*
*===-----------------------------------------------------------------------===
*/
#ifndef __SPIRV_BUILTIN_VARS_H
#define __SPIRV_BUILTIN_VARS_H
#if __cplusplus >= 201103L
#define __SPIRV_NOEXCEPT noexcept
#else
#define __SPIRV_NOEXCEPT
#endif
#pragma push_macro("__size_t")
#pragma push_macro("__uint32_t")
#pragma push_macro("__uint64_t")
#define __size_t __SIZE_TYPE__
#define __uint32_t __UINT32_TYPE__
#define __SPIRV_overloadable __attribute__((overloadable))
#define __SPIRV_convergent __attribute__((convergent))
#define __SPIRV_inline __attribute__((always_inline))
#define __global __attribute__((opencl_global))
#define __local __attribute__((opencl_local))
#define __private __attribute__((opencl_private))
#define __constant __attribute__((opencl_constant))
#ifdef __SYCL_DEVICE_ONLY__
#define __generic
#else
#define __generic __attribute__((opencl_generic))
#endif
// Check if SPIR-V builtins are supported.
// As the translator doesn't use the LLVM intrinsics (which would be emitted if
// we use the SPIR-V builtins) we can't rely on the SPIRV32/SPIRV64 etc macros
// to establish if we can use the builtin alias. We disable builtin altogether
// if we do not intent to use the backend. So instead of use target macros, rely
// on a __has_builtin test.
#if (__has_builtin(__builtin_spirv_num_workgroups))
#define __SPIRV_BUILTIN_ALIAS(builtin) \
__attribute__((clang_builtin_alias(builtin)))
#else
#define __SPIRV_BUILTIN_ALIAS(builtin)
#endif
// Builtin IDs and sizes
extern __SPIRV_BUILTIN_ALIAS(__builtin_spirv_num_workgroups) __size_t
__spirv_NumWorkgroups(int);
extern __SPIRV_BUILTIN_ALIAS(__builtin_spirv_workgroup_size) __size_t
__spirv_WorkgroupSize(int);
extern __SPIRV_BUILTIN_ALIAS(__builtin_spirv_workgroup_id) __size_t
__spirv_WorkgroupId(int);
extern __SPIRV_BUILTIN_ALIAS(__builtin_spirv_local_invocation_id) __size_t
__spirv_LocalInvocationId(int);
extern __SPIRV_BUILTIN_ALIAS(__builtin_spirv_global_invocation_id) __size_t
__spirv_GlobalInvocationId(int);
extern __SPIRV_BUILTIN_ALIAS(__builtin_spirv_global_size) __size_t
__spirv_GlobalSize(int);
extern __SPIRV_BUILTIN_ALIAS(__builtin_spirv_global_offset) __size_t
__spirv_GlobalOffset(int);
extern __SPIRV_BUILTIN_ALIAS(__builtin_spirv_subgroup_size) __uint32_t
__spirv_SubgroupSize();
extern __SPIRV_BUILTIN_ALIAS(__builtin_spirv_subgroup_max_size) __uint32_t
__spirv_SubgroupMaxSize();
extern __SPIRV_BUILTIN_ALIAS(__builtin_spirv_num_subgroups) __uint32_t
__spirv_NumSubgroups();
extern __SPIRV_BUILTIN_ALIAS(__builtin_spirv_subgroup_id) __uint32_t
__spirv_SubgroupId();
extern __SPIRV_BUILTIN_ALIAS(__builtin_spirv_subgroup_local_invocation_id)
__uint32_t __spirv_SubgroupLocalInvocationId();
// OpGenericCastToPtrExplicit
extern __SPIRV_overloadable
__SPIRV_BUILTIN_ALIAS(__builtin_spirv_generic_cast_to_ptr_explicit)
__global void *__spirv_GenericCastToPtrExplicit_ToGlobal(__generic void *,
int) __SPIRV_NOEXCEPT;
extern __SPIRV_overloadable
__SPIRV_BUILTIN_ALIAS(__builtin_spirv_generic_cast_to_ptr_explicit)
__global const void *
__spirv_GenericCastToPtrExplicit_ToGlobal(__generic const void *,
int) __SPIRV_NOEXCEPT;
extern __SPIRV_overloadable
__SPIRV_BUILTIN_ALIAS(__builtin_spirv_generic_cast_to_ptr_explicit)
__global volatile void *
__spirv_GenericCastToPtrExplicit_ToGlobal(__generic volatile void *,
int) __SPIRV_NOEXCEPT;
extern __SPIRV_overloadable
__SPIRV_BUILTIN_ALIAS(__builtin_spirv_generic_cast_to_ptr_explicit)
__global const volatile void *
__spirv_GenericCastToPtrExplicit_ToGlobal(__generic const volatile void *,
int) __SPIRV_NOEXCEPT;
extern __SPIRV_overloadable
__SPIRV_BUILTIN_ALIAS(__builtin_spirv_generic_cast_to_ptr_explicit)
__local void *__spirv_GenericCastToPtrExplicit_ToLocal(__generic void *,
int) __SPIRV_NOEXCEPT;
extern __SPIRV_overloadable
__SPIRV_BUILTIN_ALIAS(__builtin_spirv_generic_cast_to_ptr_explicit)
__local const void *
__spirv_GenericCastToPtrExplicit_ToLocal(__generic const void *,
int) __SPIRV_NOEXCEPT;
extern __SPIRV_overloadable
__SPIRV_BUILTIN_ALIAS(__builtin_spirv_generic_cast_to_ptr_explicit)
__local volatile void *
__spirv_GenericCastToPtrExplicit_ToLocal(__generic volatile void *,
int) __SPIRV_NOEXCEPT;
extern __SPIRV_overloadable
__SPIRV_BUILTIN_ALIAS(__builtin_spirv_generic_cast_to_ptr_explicit)
__local const volatile void *
__spirv_GenericCastToPtrExplicit_ToLocal(__generic const volatile void *,
int) __SPIRV_NOEXCEPT;
extern __SPIRV_overloadable
__SPIRV_BUILTIN_ALIAS(__builtin_spirv_generic_cast_to_ptr_explicit)
__private void *
__spirv_GenericCastToPtrExplicit_ToPrivate(__generic void *,
int) __SPIRV_NOEXCEPT;
extern __SPIRV_overloadable
__SPIRV_BUILTIN_ALIAS(__builtin_spirv_generic_cast_to_ptr_explicit)
__private const void *
__spirv_GenericCastToPtrExplicit_ToPrivate(__generic const void *,
int) __SPIRV_NOEXCEPT;
extern __SPIRV_overloadable
__SPIRV_BUILTIN_ALIAS(__builtin_spirv_generic_cast_to_ptr_explicit)
__private volatile void *
__spirv_GenericCastToPtrExplicit_ToPrivate(__generic volatile void *,
int) __SPIRV_NOEXCEPT;
extern __SPIRV_overloadable
__SPIRV_BUILTIN_ALIAS(__builtin_spirv_generic_cast_to_ptr_explicit)
__private const volatile void *
__spirv_GenericCastToPtrExplicit_ToPrivate(__generic const volatile void *,
int) __SPIRV_NOEXCEPT;
// OpGenericCastToPtr
static __SPIRV_overloadable __SPIRV_inline __global void *
__spirv_GenericCastToPtr_ToGlobal(__generic void *p, int) __SPIRV_NOEXCEPT {
return (__global void *)p;
}
static __SPIRV_overloadable __SPIRV_inline __global const void *
__spirv_GenericCastToPtr_ToGlobal(__generic const void *p,
int) __SPIRV_NOEXCEPT {
return (__global const void *)p;
}
static __SPIRV_overloadable __SPIRV_inline __global volatile void *
__spirv_GenericCastToPtr_ToGlobal(__generic volatile void *p,
int) __SPIRV_NOEXCEPT {
return (__global volatile void *)p;
}
static __SPIRV_overloadable __SPIRV_inline __global const volatile void *
__spirv_GenericCastToPtr_ToGlobal(__generic const volatile void *p,
int) __SPIRV_NOEXCEPT {
return (__global const volatile void *)p;
}
static __SPIRV_overloadable __SPIRV_inline __local void *
__spirv_GenericCastToPtr_ToLocal(__generic void *p, int) __SPIRV_NOEXCEPT {
return (__local void *)p;
}
static __SPIRV_overloadable __SPIRV_inline __local const void *
__spirv_GenericCastToPtr_ToLocal(__generic const void *p,
int) __SPIRV_NOEXCEPT {
return (__local const void *)p;
}
static __SPIRV_overloadable __SPIRV_inline __local volatile void *
__spirv_GenericCastToPtr_ToLocal(__generic volatile void *p,
int) __SPIRV_NOEXCEPT {
return (__local volatile void *)p;
}
static __SPIRV_overloadable __SPIRV_inline __local const volatile void *
__spirv_GenericCastToPtr_ToLocal(__generic const volatile void *p,
int) __SPIRV_NOEXCEPT {
return (__local const volatile void *)p;
}
static __SPIRV_overloadable __SPIRV_inline __private void *
__spirv_GenericCastToPtr_ToPrivate(__generic void *p, int) __SPIRV_NOEXCEPT {
return (__private void *)p;
}
static __SPIRV_overloadable __SPIRV_inline __private const void *
__spirv_GenericCastToPtr_ToPrivate(__generic const void *p,
int) __SPIRV_NOEXCEPT {
return (__private const void *)p;
}
static __SPIRV_overloadable __SPIRV_inline __private volatile void *
__spirv_GenericCastToPtr_ToPrivate(__generic volatile void *p,
int) __SPIRV_NOEXCEPT {
return (__private volatile void *)p;
}
static __SPIRV_overloadable __SPIRV_inline __private const volatile void *
__spirv_GenericCastToPtr_ToPrivate(__generic const volatile void *p,
int) __SPIRV_NOEXCEPT {
return (__private const volatile void *)p;
}
#pragma pop_macro("__size_t")
#pragma pop_macro("__uint32_t")
#pragma pop_macro("__uint64_t")
#undef __SPIRV_overloadable
#undef __SPIRV_convergent
#undef __SPIRV_inline
#undef __global
#undef __local
#undef __constant
#undef __generic
#undef __SPIRV_BUILTIN_ALIAS
#undef __SPIRV_NOEXCEPT
#endif /* __SPIRV_BUILTIN_VARS_H */

View File

@ -10,8 +10,8 @@
#ifndef va_arg
#if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 202311L
/* C23 does not require the second parameter for va_start. */
#define va_start(ap, ...) __builtin_va_start(ap, 0)
/* C23 uses a special builtin. */
#define va_start(...) __builtin_c23_va_start(__VA_ARGS__)
#else
/* Versions before C23 do require the second parameter. */
#define va_start(ap, param) __builtin_va_start(ap, param)

161
lib/include/altivec.h vendored
View File

@ -17525,70 +17525,73 @@ vec_bperm(vector unsigned long long __a, vector unsigned char __b) {
/* vec_reve */
static inline __ATTRS_o_ai vector bool char vec_reve(vector bool char __a) {
static __inline__ __ATTRS_o_ai vector bool char vec_reve(vector bool char __a) {
return __builtin_shufflevector(__a, __a, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6,
5, 4, 3, 2, 1, 0);
}
static inline __ATTRS_o_ai vector signed char vec_reve(vector signed char __a) {
static __inline__ __ATTRS_o_ai vector signed char
vec_reve(vector signed char __a) {
return __builtin_shufflevector(__a, __a, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6,
5, 4, 3, 2, 1, 0);
}
static inline __ATTRS_o_ai vector unsigned char
static __inline__ __ATTRS_o_ai vector unsigned char
vec_reve(vector unsigned char __a) {
return __builtin_shufflevector(__a, __a, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6,
5, 4, 3, 2, 1, 0);
}
static inline __ATTRS_o_ai vector bool int vec_reve(vector bool int __a) {
static __inline__ __ATTRS_o_ai vector bool int vec_reve(vector bool int __a) {
return __builtin_shufflevector(__a, __a, 3, 2, 1, 0);
}
static inline __ATTRS_o_ai vector signed int vec_reve(vector signed int __a) {
static __inline__ __ATTRS_o_ai vector signed int
vec_reve(vector signed int __a) {
return __builtin_shufflevector(__a, __a, 3, 2, 1, 0);
}
static inline __ATTRS_o_ai vector unsigned int
static __inline__ __ATTRS_o_ai vector unsigned int
vec_reve(vector unsigned int __a) {
return __builtin_shufflevector(__a, __a, 3, 2, 1, 0);
}
static inline __ATTRS_o_ai vector bool short vec_reve(vector bool short __a) {
static __inline__ __ATTRS_o_ai vector bool short
vec_reve(vector bool short __a) {
return __builtin_shufflevector(__a, __a, 7, 6, 5, 4, 3, 2, 1, 0);
}
static inline __ATTRS_o_ai vector signed short
static __inline__ __ATTRS_o_ai vector signed short
vec_reve(vector signed short __a) {
return __builtin_shufflevector(__a, __a, 7, 6, 5, 4, 3, 2, 1, 0);
}
static inline __ATTRS_o_ai vector unsigned short
static __inline__ __ATTRS_o_ai vector unsigned short
vec_reve(vector unsigned short __a) {
return __builtin_shufflevector(__a, __a, 7, 6, 5, 4, 3, 2, 1, 0);
}
static inline __ATTRS_o_ai vector float vec_reve(vector float __a) {
static __inline__ __ATTRS_o_ai vector float vec_reve(vector float __a) {
return __builtin_shufflevector(__a, __a, 3, 2, 1, 0);
}
#ifdef __VSX__
static inline __ATTRS_o_ai vector bool long long
static __inline__ __ATTRS_o_ai vector bool long long
vec_reve(vector bool long long __a) {
return __builtin_shufflevector(__a, __a, 1, 0);
}
static inline __ATTRS_o_ai vector signed long long
static __inline__ __ATTRS_o_ai vector signed long long
vec_reve(vector signed long long __a) {
return __builtin_shufflevector(__a, __a, 1, 0);
}
static inline __ATTRS_o_ai vector unsigned long long
static __inline__ __ATTRS_o_ai vector unsigned long long
vec_reve(vector unsigned long long __a) {
return __builtin_shufflevector(__a, __a, 1, 0);
}
static inline __ATTRS_o_ai vector double vec_reve(vector double __a) {
static __inline__ __ATTRS_o_ai vector double vec_reve(vector double __a) {
return __builtin_shufflevector(__a, __a, 1, 0);
}
#endif
@ -17721,42 +17724,42 @@ typedef vector signed int unaligned_vec_sint __attribute__((aligned(1)));
typedef vector unsigned int unaligned_vec_uint __attribute__((aligned(1)));
typedef vector float unaligned_vec_float __attribute__((aligned(1)));
static inline __ATTRS_o_ai vector signed char vec_xl(ptrdiff_t __offset,
const signed char *__ptr) {
static __inline__ __ATTRS_o_ai vector signed char
vec_xl(ptrdiff_t __offset, const signed char *__ptr) {
return *(unaligned_vec_schar *)(__ptr + __offset);
}
static inline __ATTRS_o_ai vector unsigned char
static __inline__ __ATTRS_o_ai vector unsigned char
vec_xl(ptrdiff_t __offset, const unsigned char *__ptr) {
return *(unaligned_vec_uchar*)(__ptr + __offset);
}
static inline __ATTRS_o_ai vector signed short
static __inline__ __ATTRS_o_ai vector signed short
vec_xl(ptrdiff_t __offset, const signed short *__ptr) {
signed char *__addr = (signed char *)__ptr + __offset;
return *(unaligned_vec_sshort *)__addr;
}
static inline __ATTRS_o_ai vector unsigned short
static __inline__ __ATTRS_o_ai vector unsigned short
vec_xl(ptrdiff_t __offset, const unsigned short *__ptr) {
signed char *__addr = (signed char *)__ptr + __offset;
return *(unaligned_vec_ushort *)__addr;
}
static inline __ATTRS_o_ai vector signed int vec_xl(ptrdiff_t __offset,
const signed int *__ptr) {
static __inline__ __ATTRS_o_ai vector signed int
vec_xl(ptrdiff_t __offset, const signed int *__ptr) {
signed char *__addr = (signed char *)__ptr + __offset;
return *(unaligned_vec_sint *)__addr;
}
static inline __ATTRS_o_ai vector unsigned int
static __inline__ __ATTRS_o_ai vector unsigned int
vec_xl(ptrdiff_t __offset, const unsigned int *__ptr) {
signed char *__addr = (signed char *)__ptr + __offset;
return *(unaligned_vec_uint *)__addr;
}
static inline __ATTRS_o_ai vector float vec_xl(ptrdiff_t __offset,
const float *__ptr) {
static __inline__ __ATTRS_o_ai vector float vec_xl(ptrdiff_t __offset,
const float *__ptr) {
signed char *__addr = (signed char *)__ptr + __offset;
return *(unaligned_vec_float *)__addr;
}
@ -17766,20 +17769,20 @@ typedef vector signed long long unaligned_vec_sll __attribute__((aligned(1)));
typedef vector unsigned long long unaligned_vec_ull __attribute__((aligned(1)));
typedef vector double unaligned_vec_double __attribute__((aligned(1)));
static inline __ATTRS_o_ai vector signed long long
static __inline__ __ATTRS_o_ai vector signed long long
vec_xl(ptrdiff_t __offset, const signed long long *__ptr) {
signed char *__addr = (signed char *)__ptr + __offset;
return *(unaligned_vec_sll *)__addr;
}
static inline __ATTRS_o_ai vector unsigned long long
static __inline__ __ATTRS_o_ai vector unsigned long long
vec_xl(ptrdiff_t __offset, const unsigned long long *__ptr) {
signed char *__addr = (signed char *)__ptr + __offset;
return *(unaligned_vec_ull *)__addr;
}
static inline __ATTRS_o_ai vector double vec_xl(ptrdiff_t __offset,
const double *__ptr) {
static __inline__ __ATTRS_o_ai vector double vec_xl(ptrdiff_t __offset,
const double *__ptr) {
signed char *__addr = (signed char *)__ptr + __offset;
return *(unaligned_vec_double *)__addr;
}
@ -17790,13 +17793,13 @@ static inline __ATTRS_o_ai vector double vec_xl(ptrdiff_t __offset,
typedef vector signed __int128 unaligned_vec_si128 __attribute__((aligned(1)));
typedef vector unsigned __int128 unaligned_vec_ui128
__attribute__((aligned(1)));
static inline __ATTRS_o_ai vector signed __int128
static __inline__ __ATTRS_o_ai vector signed __int128
vec_xl(ptrdiff_t __offset, const signed __int128 *__ptr) {
signed char *__addr = (signed char *)__ptr + __offset;
return *(unaligned_vec_si128 *)__addr;
}
static inline __ATTRS_o_ai vector unsigned __int128
static __inline__ __ATTRS_o_ai vector unsigned __int128
vec_xl(ptrdiff_t __offset, const unsigned __int128 *__ptr) {
signed char *__addr = (signed char *)__ptr + __offset;
return *(unaligned_vec_ui128 *)__addr;
@ -17991,64 +17994,64 @@ vec_load_splats(unsigned long long __offset, const float *__ptr) {
#define vec_xstd2 vec_xst
#define vec_xstw4 vec_xst
static inline __ATTRS_o_ai void
static __inline__ __ATTRS_o_ai void
vec_xst(vector signed char __vec, ptrdiff_t __offset, signed char *__ptr) {
*(unaligned_vec_schar *)(__ptr + __offset) = __vec;
}
static inline __ATTRS_o_ai void
static __inline__ __ATTRS_o_ai void
vec_xst(vector unsigned char __vec, ptrdiff_t __offset, unsigned char *__ptr) {
*(unaligned_vec_uchar *)(__ptr + __offset) = __vec;
}
static inline __ATTRS_o_ai void
static __inline__ __ATTRS_o_ai void
vec_xst(vector signed short __vec, ptrdiff_t __offset, signed short *__ptr) {
signed char *__addr = (signed char *)__ptr + __offset;
*(unaligned_vec_sshort *)__addr = __vec;
}
static inline __ATTRS_o_ai void vec_xst(vector unsigned short __vec,
ptrdiff_t __offset,
unsigned short *__ptr) {
static __inline__ __ATTRS_o_ai void vec_xst(vector unsigned short __vec,
ptrdiff_t __offset,
unsigned short *__ptr) {
signed char *__addr = (signed char *)__ptr + __offset;
*(unaligned_vec_ushort *)__addr = __vec;
}
static inline __ATTRS_o_ai void vec_xst(vector signed int __vec,
ptrdiff_t __offset, signed int *__ptr) {
static __inline__ __ATTRS_o_ai void
vec_xst(vector signed int __vec, ptrdiff_t __offset, signed int *__ptr) {
signed char *__addr = (signed char *)__ptr + __offset;
*(unaligned_vec_sint *)__addr = __vec;
}
static inline __ATTRS_o_ai void
static __inline__ __ATTRS_o_ai void
vec_xst(vector unsigned int __vec, ptrdiff_t __offset, unsigned int *__ptr) {
signed char *__addr = (signed char *)__ptr + __offset;
*(unaligned_vec_uint *)__addr = __vec;
}
static inline __ATTRS_o_ai void vec_xst(vector float __vec, ptrdiff_t __offset,
float *__ptr) {
static __inline__ __ATTRS_o_ai void vec_xst(vector float __vec,
ptrdiff_t __offset, float *__ptr) {
signed char *__addr = (signed char *)__ptr + __offset;
*(unaligned_vec_float *)__addr = __vec;
}
#ifdef __VSX__
static inline __ATTRS_o_ai void vec_xst(vector signed long long __vec,
ptrdiff_t __offset,
signed long long *__ptr) {
static __inline__ __ATTRS_o_ai void vec_xst(vector signed long long __vec,
ptrdiff_t __offset,
signed long long *__ptr) {
signed char *__addr = (signed char *)__ptr + __offset;
*(unaligned_vec_sll *)__addr = __vec;
}
static inline __ATTRS_o_ai void vec_xst(vector unsigned long long __vec,
ptrdiff_t __offset,
unsigned long long *__ptr) {
static __inline__ __ATTRS_o_ai void vec_xst(vector unsigned long long __vec,
ptrdiff_t __offset,
unsigned long long *__ptr) {
signed char *__addr = (signed char *)__ptr + __offset;
*(unaligned_vec_ull *)__addr = __vec;
}
static inline __ATTRS_o_ai void vec_xst(vector double __vec, ptrdiff_t __offset,
double *__ptr) {
static __inline__ __ATTRS_o_ai void vec_xst(vector double __vec,
ptrdiff_t __offset, double *__ptr) {
signed char *__addr = (signed char *)__ptr + __offset;
*(unaligned_vec_double *)__addr = __vec;
}
@ -18056,16 +18059,16 @@ static inline __ATTRS_o_ai void vec_xst(vector double __vec, ptrdiff_t __offset,
#if defined(__POWER8_VECTOR__) && defined(__powerpc64__) && \
defined(__SIZEOF_INT128__)
static inline __ATTRS_o_ai void vec_xst(vector signed __int128 __vec,
ptrdiff_t __offset,
signed __int128 *__ptr) {
static __inline__ __ATTRS_o_ai void vec_xst(vector signed __int128 __vec,
ptrdiff_t __offset,
signed __int128 *__ptr) {
signed char *__addr = (signed char *)__ptr + __offset;
*(unaligned_vec_si128 *)__addr = __vec;
}
static inline __ATTRS_o_ai void vec_xst(vector unsigned __int128 __vec,
ptrdiff_t __offset,
unsigned __int128 *__ptr) {
static __inline__ __ATTRS_o_ai void vec_xst(vector unsigned __int128 __vec,
ptrdiff_t __offset,
unsigned __int128 *__ptr) {
signed char *__addr = (signed char *)__ptr + __offset;
*(unaligned_vec_ui128 *)__addr = __vec;
}
@ -18075,51 +18078,51 @@ static inline __ATTRS_o_ai void vec_xst(vector unsigned __int128 __vec,
#if defined(__POWER10_VECTOR__) && defined(__VSX__) && \
defined(__SIZEOF_INT128__)
static inline __ATTRS_o_ai void vec_xst_trunc(vector signed __int128 __vec,
ptrdiff_t __offset,
signed char *__ptr) {
static __inline__ __ATTRS_o_ai void vec_xst_trunc(vector signed __int128 __vec,
ptrdiff_t __offset,
signed char *__ptr) {
*(__ptr + __offset) = (signed char)__vec[0];
}
static inline __ATTRS_o_ai void vec_xst_trunc(vector unsigned __int128 __vec,
ptrdiff_t __offset,
unsigned char *__ptr) {
static __inline__ __ATTRS_o_ai void
vec_xst_trunc(vector unsigned __int128 __vec, ptrdiff_t __offset,
unsigned char *__ptr) {
*(__ptr + __offset) = (unsigned char)__vec[0];
}
static inline __ATTRS_o_ai void vec_xst_trunc(vector signed __int128 __vec,
ptrdiff_t __offset,
signed short *__ptr) {
static __inline__ __ATTRS_o_ai void vec_xst_trunc(vector signed __int128 __vec,
ptrdiff_t __offset,
signed short *__ptr) {
*(__ptr + __offset) = (signed short)__vec[0];
}
static inline __ATTRS_o_ai void vec_xst_trunc(vector unsigned __int128 __vec,
ptrdiff_t __offset,
unsigned short *__ptr) {
static __inline__ __ATTRS_o_ai void
vec_xst_trunc(vector unsigned __int128 __vec, ptrdiff_t __offset,
unsigned short *__ptr) {
*(__ptr + __offset) = (unsigned short)__vec[0];
}
static inline __ATTRS_o_ai void vec_xst_trunc(vector signed __int128 __vec,
ptrdiff_t __offset,
signed int *__ptr) {
static __inline__ __ATTRS_o_ai void vec_xst_trunc(vector signed __int128 __vec,
ptrdiff_t __offset,
signed int *__ptr) {
*(__ptr + __offset) = (signed int)__vec[0];
}
static inline __ATTRS_o_ai void vec_xst_trunc(vector unsigned __int128 __vec,
ptrdiff_t __offset,
unsigned int *__ptr) {
static __inline__ __ATTRS_o_ai void
vec_xst_trunc(vector unsigned __int128 __vec, ptrdiff_t __offset,
unsigned int *__ptr) {
*(__ptr + __offset) = (unsigned int)__vec[0];
}
static inline __ATTRS_o_ai void vec_xst_trunc(vector signed __int128 __vec,
ptrdiff_t __offset,
signed long long *__ptr) {
static __inline__ __ATTRS_o_ai void vec_xst_trunc(vector signed __int128 __vec,
ptrdiff_t __offset,
signed long long *__ptr) {
*(__ptr + __offset) = (signed long long)__vec[0];
}
static inline __ATTRS_o_ai void vec_xst_trunc(vector unsigned __int128 __vec,
ptrdiff_t __offset,
unsigned long long *__ptr) {
static __inline__ __ATTRS_o_ai void
vec_xst_trunc(vector unsigned __int128 __vec, ptrdiff_t __offset,
unsigned long long *__ptr) {
*(__ptr + __offset) = (unsigned long long)__vec[0];
}
#endif

View File

@ -228,7 +228,7 @@
/// dst.byte[i] := a.row[row_index].byte[row_chunk+i]
/// ENDFOR
/// \endcode
#define _tile_movrow(a, b) __builtin_ia32_tilemovrow(a, b)
#define _tile_movrow(a, b) ((__m512i)__builtin_ia32_tilemovrow(a, b))
/// This is internal intrinsic. C/C++ user should avoid calling it directly.

View File

@ -135,9 +135,8 @@ _tile_cmmrlfp16ps_internal(unsigned short m, unsigned short n, unsigned short k,
/// The 1st source tile. Max size is 1024 Bytes.
/// \param src1
/// The 2nd source tile. Max size is 1024 Bytes.
__DEFAULT_FN_ATTRS_COMPLEX
static void __tile_cmmimfp16ps(__tile1024i *dst, __tile1024i src0,
__tile1024i src1) {
static __inline__ void __DEFAULT_FN_ATTRS_COMPLEX
__tile_cmmimfp16ps(__tile1024i *dst, __tile1024i src0, __tile1024i src1) {
dst->tile = _tile_cmmimfp16ps_internal(src0.row, src1.col, src0.col,
dst->tile, src0.tile, src1.tile);
}
@ -158,9 +157,8 @@ static void __tile_cmmimfp16ps(__tile1024i *dst, __tile1024i src0,
/// The 1st source tile. Max size is 1024 Bytes.
/// \param src1
/// The 2nd source tile. Max size is 1024 Bytes.
__DEFAULT_FN_ATTRS_COMPLEX
static void __tile_cmmrlfp16ps(__tile1024i *dst, __tile1024i src0,
__tile1024i src1) {
static __inline__ void __DEFAULT_FN_ATTRS_COMPLEX
__tile_cmmrlfp16ps(__tile1024i *dst, __tile1024i src0, __tile1024i src1) {
dst->tile = _tile_cmmrlfp16ps_internal(src0.row, src1.col, src0.col,
dst->tile, src0.tile, src1.tile);
}

View File

@ -197,4 +197,4 @@ static void __tile_2rpntlvwz1rst1(__tile1024i *dst0, __tile1024i *dst1,
#undef __DEFAULT_FN_ATTRS
#endif /* __x86_64__ */
#endif /* __AMX_MOVRS_TRANSPOSEINTRIN_H */
#endif /* __AMX_MOVRS_TRANSPOSEINTRIN_H */

View File

@ -8,7 +8,7 @@
*/
#ifndef __IMMINTRIN_H
#error \
"Never use <amxtf32tranposeintrin.h> directly; include <immintrin.h> instead."
"Never use <amxtf32transposeintrin.h> directly; include <immintrin.h> instead."
#endif // __IMMINTRIN_H
#ifndef __AMX_TF32TRANSPOSEINTRIN_H

16
lib/include/andes_vector.h vendored Normal file
View File

@ -0,0 +1,16 @@
//===----- andes_vector.h - Andes Vector definitions ----------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
#ifndef _ANDES_VECTOR_H_
#define _ANDES_VECTOR_H_
#include "riscv_vector.h"
#pragma clang riscv intrinsic andes_vector
#endif //_ANDES_VECTOR_H_

View File

@ -29,47 +29,16 @@ extern "C" {
/* 7 SYNCHRONIZATION, BARRIER AND HINT INTRINSICS */
/* 7.3 Memory barriers */
#if !__has_builtin(__dmb)
#define __dmb(i) __builtin_arm_dmb(i)
#endif
#if !__has_builtin(__dsb)
#define __dsb(i) __builtin_arm_dsb(i)
#endif
#if !__has_builtin(__isb)
#define __isb(i) __builtin_arm_isb(i)
#endif
void __dmb(unsigned int);
void __dsb(unsigned int);
void __isb(unsigned int);
/* 7.4 Hints */
#if !__has_builtin(__wfi)
static __inline__ void __attribute__((__always_inline__, __nodebug__)) __wfi(void) {
__builtin_arm_wfi();
}
#endif
#if !__has_builtin(__wfe)
static __inline__ void __attribute__((__always_inline__, __nodebug__)) __wfe(void) {
__builtin_arm_wfe();
}
#endif
#if !__has_builtin(__sev)
static __inline__ void __attribute__((__always_inline__, __nodebug__)) __sev(void) {
__builtin_arm_sev();
}
#endif
#if !__has_builtin(__sevl)
static __inline__ void __attribute__((__always_inline__, __nodebug__)) __sevl(void) {
__builtin_arm_sevl();
}
#endif
#if !__has_builtin(__yield)
static __inline__ void __attribute__((__always_inline__, __nodebug__)) __yield(void) {
__builtin_arm_yield();
}
#endif
void __wfi(void);
void __wfe(void);
void __sev(void);
void __sevl(void);
void __yield(void);
#if defined(__ARM_32BIT_STATE) && __ARM_32BIT_STATE
#define __dbg(t) __builtin_arm_dbg(t)
@ -872,8 +841,9 @@ __gcspopm() {
return __builtin_arm_gcspopm(0);
}
static __inline__ const void * __attribute__((__always_inline__, __nodebug__, target("gcs")))
__gcsss(const void *__stack) {
static __inline__ void *__attribute__((__always_inline__, __nodebug__,
target("gcs")))
__gcsss(void *__stack) {
return __builtin_arm_gcsss(__stack);
}
#endif

178
lib/include/arm_fp16.h vendored
View File

@ -34,408 +34,408 @@ typedef __fp16 float16_t;
float16_t __ret; \
float16_t __s0 = __p0; \
float16_t __s1 = __p1; \
__ret = (float16_t) __builtin_neon_vabdh_f16(__s0, __s1); \
__ret = __builtin_bit_cast(float16_t, __builtin_neon_vabdh_f16(__s0, __s1)); \
__ret; \
})
#define vabsh_f16(__p0) __extension__ ({ \
float16_t __ret; \
float16_t __s0 = __p0; \
__ret = (float16_t) __builtin_neon_vabsh_f16(__s0); \
__ret = __builtin_bit_cast(float16_t, __builtin_neon_vabsh_f16(__s0)); \
__ret; \
})
#define vaddh_f16(__p0, __p1) __extension__ ({ \
float16_t __ret; \
float16_t __s0 = __p0; \
float16_t __s1 = __p1; \
__ret = (float16_t) __builtin_neon_vaddh_f16(__s0, __s1); \
__ret = __builtin_bit_cast(float16_t, __builtin_neon_vaddh_f16(__s0, __s1)); \
__ret; \
})
#define vcageh_f16(__p0, __p1) __extension__ ({ \
uint16_t __ret; \
float16_t __s0 = __p0; \
float16_t __s1 = __p1; \
__ret = (uint16_t) __builtin_neon_vcageh_f16(__s0, __s1); \
__ret = __builtin_bit_cast(uint16_t, __builtin_neon_vcageh_f16(__s0, __s1)); \
__ret; \
})
#define vcagth_f16(__p0, __p1) __extension__ ({ \
uint16_t __ret; \
float16_t __s0 = __p0; \
float16_t __s1 = __p1; \
__ret = (uint16_t) __builtin_neon_vcagth_f16(__s0, __s1); \
__ret = __builtin_bit_cast(uint16_t, __builtin_neon_vcagth_f16(__s0, __s1)); \
__ret; \
})
#define vcaleh_f16(__p0, __p1) __extension__ ({ \
uint16_t __ret; \
float16_t __s0 = __p0; \
float16_t __s1 = __p1; \
__ret = (uint16_t) __builtin_neon_vcaleh_f16(__s0, __s1); \
__ret = __builtin_bit_cast(uint16_t, __builtin_neon_vcaleh_f16(__s0, __s1)); \
__ret; \
})
#define vcalth_f16(__p0, __p1) __extension__ ({ \
uint16_t __ret; \
float16_t __s0 = __p0; \
float16_t __s1 = __p1; \
__ret = (uint16_t) __builtin_neon_vcalth_f16(__s0, __s1); \
__ret = __builtin_bit_cast(uint16_t, __builtin_neon_vcalth_f16(__s0, __s1)); \
__ret; \
})
#define vceqh_f16(__p0, __p1) __extension__ ({ \
uint16_t __ret; \
float16_t __s0 = __p0; \
float16_t __s1 = __p1; \
__ret = (uint16_t) __builtin_neon_vceqh_f16(__s0, __s1); \
__ret = __builtin_bit_cast(uint16_t, __builtin_neon_vceqh_f16(__s0, __s1)); \
__ret; \
})
#define vceqzh_f16(__p0) __extension__ ({ \
uint16_t __ret; \
float16_t __s0 = __p0; \
__ret = (uint16_t) __builtin_neon_vceqzh_f16(__s0); \
__ret = __builtin_bit_cast(uint16_t, __builtin_neon_vceqzh_f16(__s0)); \
__ret; \
})
#define vcgeh_f16(__p0, __p1) __extension__ ({ \
uint16_t __ret; \
float16_t __s0 = __p0; \
float16_t __s1 = __p1; \
__ret = (uint16_t) __builtin_neon_vcgeh_f16(__s0, __s1); \
__ret = __builtin_bit_cast(uint16_t, __builtin_neon_vcgeh_f16(__s0, __s1)); \
__ret; \
})
#define vcgezh_f16(__p0) __extension__ ({ \
uint16_t __ret; \
float16_t __s0 = __p0; \
__ret = (uint16_t) __builtin_neon_vcgezh_f16(__s0); \
__ret = __builtin_bit_cast(uint16_t, __builtin_neon_vcgezh_f16(__s0)); \
__ret; \
})
#define vcgth_f16(__p0, __p1) __extension__ ({ \
uint16_t __ret; \
float16_t __s0 = __p0; \
float16_t __s1 = __p1; \
__ret = (uint16_t) __builtin_neon_vcgth_f16(__s0, __s1); \
__ret = __builtin_bit_cast(uint16_t, __builtin_neon_vcgth_f16(__s0, __s1)); \
__ret; \
})
#define vcgtzh_f16(__p0) __extension__ ({ \
uint16_t __ret; \
float16_t __s0 = __p0; \
__ret = (uint16_t) __builtin_neon_vcgtzh_f16(__s0); \
__ret = __builtin_bit_cast(uint16_t, __builtin_neon_vcgtzh_f16(__s0)); \
__ret; \
})
#define vcleh_f16(__p0, __p1) __extension__ ({ \
uint16_t __ret; \
float16_t __s0 = __p0; \
float16_t __s1 = __p1; \
__ret = (uint16_t) __builtin_neon_vcleh_f16(__s0, __s1); \
__ret = __builtin_bit_cast(uint16_t, __builtin_neon_vcleh_f16(__s0, __s1)); \
__ret; \
})
#define vclezh_f16(__p0) __extension__ ({ \
uint16_t __ret; \
float16_t __s0 = __p0; \
__ret = (uint16_t) __builtin_neon_vclezh_f16(__s0); \
__ret = __builtin_bit_cast(uint16_t, __builtin_neon_vclezh_f16(__s0)); \
__ret; \
})
#define vclth_f16(__p0, __p1) __extension__ ({ \
uint16_t __ret; \
float16_t __s0 = __p0; \
float16_t __s1 = __p1; \
__ret = (uint16_t) __builtin_neon_vclth_f16(__s0, __s1); \
__ret = __builtin_bit_cast(uint16_t, __builtin_neon_vclth_f16(__s0, __s1)); \
__ret; \
})
#define vcltzh_f16(__p0) __extension__ ({ \
uint16_t __ret; \
float16_t __s0 = __p0; \
__ret = (uint16_t) __builtin_neon_vcltzh_f16(__s0); \
__ret = __builtin_bit_cast(uint16_t, __builtin_neon_vcltzh_f16(__s0)); \
__ret; \
})
#define vcvth_n_s16_f16(__p0, __p1) __extension__ ({ \
int16_t __ret; \
float16_t __s0 = __p0; \
__ret = (int16_t) __builtin_neon_vcvth_n_s16_f16(__s0, __p1); \
__ret = __builtin_bit_cast(int16_t, __builtin_neon_vcvth_n_s16_f16(__s0, __p1)); \
__ret; \
})
#define vcvth_n_s32_f16(__p0, __p1) __extension__ ({ \
int32_t __ret; \
float16_t __s0 = __p0; \
__ret = (int32_t) __builtin_neon_vcvth_n_s32_f16(__s0, __p1); \
__ret = __builtin_bit_cast(int32_t, __builtin_neon_vcvth_n_s32_f16(__s0, __p1)); \
__ret; \
})
#define vcvth_n_s64_f16(__p0, __p1) __extension__ ({ \
int64_t __ret; \
float16_t __s0 = __p0; \
__ret = (int64_t) __builtin_neon_vcvth_n_s64_f16(__s0, __p1); \
__ret = __builtin_bit_cast(int64_t, __builtin_neon_vcvth_n_s64_f16(__s0, __p1)); \
__ret; \
})
#define vcvth_n_u16_f16(__p0, __p1) __extension__ ({ \
uint16_t __ret; \
float16_t __s0 = __p0; \
__ret = (uint16_t) __builtin_neon_vcvth_n_u16_f16(__s0, __p1); \
__ret = __builtin_bit_cast(uint16_t, __builtin_neon_vcvth_n_u16_f16(__s0, __p1)); \
__ret; \
})
#define vcvth_n_u32_f16(__p0, __p1) __extension__ ({ \
uint32_t __ret; \
float16_t __s0 = __p0; \
__ret = (uint32_t) __builtin_neon_vcvth_n_u32_f16(__s0, __p1); \
__ret = __builtin_bit_cast(uint32_t, __builtin_neon_vcvth_n_u32_f16(__s0, __p1)); \
__ret; \
})
#define vcvth_n_u64_f16(__p0, __p1) __extension__ ({ \
uint64_t __ret; \
float16_t __s0 = __p0; \
__ret = (uint64_t) __builtin_neon_vcvth_n_u64_f16(__s0, __p1); \
__ret = __builtin_bit_cast(uint64_t, __builtin_neon_vcvth_n_u64_f16(__s0, __p1)); \
__ret; \
})
#define vcvth_s16_f16(__p0) __extension__ ({ \
int16_t __ret; \
float16_t __s0 = __p0; \
__ret = (int16_t) __builtin_neon_vcvth_s16_f16(__s0); \
__ret = __builtin_bit_cast(int16_t, __builtin_neon_vcvth_s16_f16(__s0)); \
__ret; \
})
#define vcvth_s32_f16(__p0) __extension__ ({ \
int32_t __ret; \
float16_t __s0 = __p0; \
__ret = (int32_t) __builtin_neon_vcvth_s32_f16(__s0); \
__ret = __builtin_bit_cast(int32_t, __builtin_neon_vcvth_s32_f16(__s0)); \
__ret; \
})
#define vcvth_s64_f16(__p0) __extension__ ({ \
int64_t __ret; \
float16_t __s0 = __p0; \
__ret = (int64_t) __builtin_neon_vcvth_s64_f16(__s0); \
__ret = __builtin_bit_cast(int64_t, __builtin_neon_vcvth_s64_f16(__s0)); \
__ret; \
})
#define vcvth_u16_f16(__p0) __extension__ ({ \
uint16_t __ret; \
float16_t __s0 = __p0; \
__ret = (uint16_t) __builtin_neon_vcvth_u16_f16(__s0); \
__ret = __builtin_bit_cast(uint16_t, __builtin_neon_vcvth_u16_f16(__s0)); \
__ret; \
})
#define vcvth_u32_f16(__p0) __extension__ ({ \
uint32_t __ret; \
float16_t __s0 = __p0; \
__ret = (uint32_t) __builtin_neon_vcvth_u32_f16(__s0); \
__ret = __builtin_bit_cast(uint32_t, __builtin_neon_vcvth_u32_f16(__s0)); \
__ret; \
})
#define vcvth_u64_f16(__p0) __extension__ ({ \
uint64_t __ret; \
float16_t __s0 = __p0; \
__ret = (uint64_t) __builtin_neon_vcvth_u64_f16(__s0); \
__ret = __builtin_bit_cast(uint64_t, __builtin_neon_vcvth_u64_f16(__s0)); \
__ret; \
})
#define vcvtah_s16_f16(__p0) __extension__ ({ \
int16_t __ret; \
float16_t __s0 = __p0; \
__ret = (int16_t) __builtin_neon_vcvtah_s16_f16(__s0); \
__ret = __builtin_bit_cast(int16_t, __builtin_neon_vcvtah_s16_f16(__s0)); \
__ret; \
})
#define vcvtah_s32_f16(__p0) __extension__ ({ \
int32_t __ret; \
float16_t __s0 = __p0; \
__ret = (int32_t) __builtin_neon_vcvtah_s32_f16(__s0); \
__ret = __builtin_bit_cast(int32_t, __builtin_neon_vcvtah_s32_f16(__s0)); \
__ret; \
})
#define vcvtah_s64_f16(__p0) __extension__ ({ \
int64_t __ret; \
float16_t __s0 = __p0; \
__ret = (int64_t) __builtin_neon_vcvtah_s64_f16(__s0); \
__ret = __builtin_bit_cast(int64_t, __builtin_neon_vcvtah_s64_f16(__s0)); \
__ret; \
})
#define vcvtah_u16_f16(__p0) __extension__ ({ \
uint16_t __ret; \
float16_t __s0 = __p0; \
__ret = (uint16_t) __builtin_neon_vcvtah_u16_f16(__s0); \
__ret = __builtin_bit_cast(uint16_t, __builtin_neon_vcvtah_u16_f16(__s0)); \
__ret; \
})
#define vcvtah_u32_f16(__p0) __extension__ ({ \
uint32_t __ret; \
float16_t __s0 = __p0; \
__ret = (uint32_t) __builtin_neon_vcvtah_u32_f16(__s0); \
__ret = __builtin_bit_cast(uint32_t, __builtin_neon_vcvtah_u32_f16(__s0)); \
__ret; \
})
#define vcvtah_u64_f16(__p0) __extension__ ({ \
uint64_t __ret; \
float16_t __s0 = __p0; \
__ret = (uint64_t) __builtin_neon_vcvtah_u64_f16(__s0); \
__ret = __builtin_bit_cast(uint64_t, __builtin_neon_vcvtah_u64_f16(__s0)); \
__ret; \
})
#define vcvth_f16_u16(__p0) __extension__ ({ \
float16_t __ret; \
uint16_t __s0 = __p0; \
__ret = (float16_t) __builtin_neon_vcvth_f16_u16(__s0); \
__ret = __builtin_bit_cast(float16_t, __builtin_neon_vcvth_f16_u16(__s0)); \
__ret; \
})
#define vcvth_f16_s16(__p0) __extension__ ({ \
float16_t __ret; \
int16_t __s0 = __p0; \
__ret = (float16_t) __builtin_neon_vcvth_f16_s16(__s0); \
__ret = __builtin_bit_cast(float16_t, __builtin_neon_vcvth_f16_s16(__s0)); \
__ret; \
})
#define vcvth_f16_u32(__p0) __extension__ ({ \
float16_t __ret; \
uint32_t __s0 = __p0; \
__ret = (float16_t) __builtin_neon_vcvth_f16_u32(__s0); \
__ret = __builtin_bit_cast(float16_t, __builtin_neon_vcvth_f16_u32(__s0)); \
__ret; \
})
#define vcvth_f16_s32(__p0) __extension__ ({ \
float16_t __ret; \
int32_t __s0 = __p0; \
__ret = (float16_t) __builtin_neon_vcvth_f16_s32(__s0); \
__ret = __builtin_bit_cast(float16_t, __builtin_neon_vcvth_f16_s32(__s0)); \
__ret; \
})
#define vcvth_f16_u64(__p0) __extension__ ({ \
float16_t __ret; \
uint64_t __s0 = __p0; \
__ret = (float16_t) __builtin_neon_vcvth_f16_u64(__s0); \
__ret = __builtin_bit_cast(float16_t, __builtin_neon_vcvth_f16_u64(__s0)); \
__ret; \
})
#define vcvth_f16_s64(__p0) __extension__ ({ \
float16_t __ret; \
int64_t __s0 = __p0; \
__ret = (float16_t) __builtin_neon_vcvth_f16_s64(__s0); \
__ret = __builtin_bit_cast(float16_t, __builtin_neon_vcvth_f16_s64(__s0)); \
__ret; \
})
#define vcvth_n_f16_u32(__p0, __p1) __extension__ ({ \
float16_t __ret; \
uint32_t __s0 = __p0; \
__ret = (float16_t) __builtin_neon_vcvth_n_f16_u32(__s0, __p1); \
__ret = __builtin_bit_cast(float16_t, __builtin_neon_vcvth_n_f16_u32(__s0, __p1)); \
__ret; \
})
#define vcvth_n_f16_s32(__p0, __p1) __extension__ ({ \
float16_t __ret; \
int32_t __s0 = __p0; \
__ret = (float16_t) __builtin_neon_vcvth_n_f16_s32(__s0, __p1); \
__ret = __builtin_bit_cast(float16_t, __builtin_neon_vcvth_n_f16_s32(__s0, __p1)); \
__ret; \
})
#define vcvth_n_f16_u64(__p0, __p1) __extension__ ({ \
float16_t __ret; \
uint64_t __s0 = __p0; \
__ret = (float16_t) __builtin_neon_vcvth_n_f16_u64(__s0, __p1); \
__ret = __builtin_bit_cast(float16_t, __builtin_neon_vcvth_n_f16_u64(__s0, __p1)); \
__ret; \
})
#define vcvth_n_f16_s64(__p0, __p1) __extension__ ({ \
float16_t __ret; \
int64_t __s0 = __p0; \
__ret = (float16_t) __builtin_neon_vcvth_n_f16_s64(__s0, __p1); \
__ret = __builtin_bit_cast(float16_t, __builtin_neon_vcvth_n_f16_s64(__s0, __p1)); \
__ret; \
})
#define vcvth_n_f16_u16(__p0, __p1) __extension__ ({ \
float16_t __ret; \
uint16_t __s0 = __p0; \
__ret = (float16_t) __builtin_neon_vcvth_n_f16_u16(__s0, __p1); \
__ret = __builtin_bit_cast(float16_t, __builtin_neon_vcvth_n_f16_u16(__s0, __p1)); \
__ret; \
})
#define vcvth_n_f16_s16(__p0, __p1) __extension__ ({ \
float16_t __ret; \
int16_t __s0 = __p0; \
__ret = (float16_t) __builtin_neon_vcvth_n_f16_s16(__s0, __p1); \
__ret = __builtin_bit_cast(float16_t, __builtin_neon_vcvth_n_f16_s16(__s0, __p1)); \
__ret; \
})
#define vcvtmh_s16_f16(__p0) __extension__ ({ \
int16_t __ret; \
float16_t __s0 = __p0; \
__ret = (int16_t) __builtin_neon_vcvtmh_s16_f16(__s0); \
__ret = __builtin_bit_cast(int16_t, __builtin_neon_vcvtmh_s16_f16(__s0)); \
__ret; \
})
#define vcvtmh_s32_f16(__p0) __extension__ ({ \
int32_t __ret; \
float16_t __s0 = __p0; \
__ret = (int32_t) __builtin_neon_vcvtmh_s32_f16(__s0); \
__ret = __builtin_bit_cast(int32_t, __builtin_neon_vcvtmh_s32_f16(__s0)); \
__ret; \
})
#define vcvtmh_s64_f16(__p0) __extension__ ({ \
int64_t __ret; \
float16_t __s0 = __p0; \
__ret = (int64_t) __builtin_neon_vcvtmh_s64_f16(__s0); \
__ret = __builtin_bit_cast(int64_t, __builtin_neon_vcvtmh_s64_f16(__s0)); \
__ret; \
})
#define vcvtmh_u16_f16(__p0) __extension__ ({ \
uint16_t __ret; \
float16_t __s0 = __p0; \
__ret = (uint16_t) __builtin_neon_vcvtmh_u16_f16(__s0); \
__ret = __builtin_bit_cast(uint16_t, __builtin_neon_vcvtmh_u16_f16(__s0)); \
__ret; \
})
#define vcvtmh_u32_f16(__p0) __extension__ ({ \
uint32_t __ret; \
float16_t __s0 = __p0; \
__ret = (uint32_t) __builtin_neon_vcvtmh_u32_f16(__s0); \
__ret = __builtin_bit_cast(uint32_t, __builtin_neon_vcvtmh_u32_f16(__s0)); \
__ret; \
})
#define vcvtmh_u64_f16(__p0) __extension__ ({ \
uint64_t __ret; \
float16_t __s0 = __p0; \
__ret = (uint64_t) __builtin_neon_vcvtmh_u64_f16(__s0); \
__ret = __builtin_bit_cast(uint64_t, __builtin_neon_vcvtmh_u64_f16(__s0)); \
__ret; \
})
#define vcvtnh_s16_f16(__p0) __extension__ ({ \
int16_t __ret; \
float16_t __s0 = __p0; \
__ret = (int16_t) __builtin_neon_vcvtnh_s16_f16(__s0); \
__ret = __builtin_bit_cast(int16_t, __builtin_neon_vcvtnh_s16_f16(__s0)); \
__ret; \
})
#define vcvtnh_s32_f16(__p0) __extension__ ({ \
int32_t __ret; \
float16_t __s0 = __p0; \
__ret = (int32_t) __builtin_neon_vcvtnh_s32_f16(__s0); \
__ret = __builtin_bit_cast(int32_t, __builtin_neon_vcvtnh_s32_f16(__s0)); \
__ret; \
})
#define vcvtnh_s64_f16(__p0) __extension__ ({ \
int64_t __ret; \
float16_t __s0 = __p0; \
__ret = (int64_t) __builtin_neon_vcvtnh_s64_f16(__s0); \
__ret = __builtin_bit_cast(int64_t, __builtin_neon_vcvtnh_s64_f16(__s0)); \
__ret; \
})
#define vcvtnh_u16_f16(__p0) __extension__ ({ \
uint16_t __ret; \
float16_t __s0 = __p0; \
__ret = (uint16_t) __builtin_neon_vcvtnh_u16_f16(__s0); \
__ret = __builtin_bit_cast(uint16_t, __builtin_neon_vcvtnh_u16_f16(__s0)); \
__ret; \
})
#define vcvtnh_u32_f16(__p0) __extension__ ({ \
uint32_t __ret; \
float16_t __s0 = __p0; \
__ret = (uint32_t) __builtin_neon_vcvtnh_u32_f16(__s0); \
__ret = __builtin_bit_cast(uint32_t, __builtin_neon_vcvtnh_u32_f16(__s0)); \
__ret; \
})
#define vcvtnh_u64_f16(__p0) __extension__ ({ \
uint64_t __ret; \
float16_t __s0 = __p0; \
__ret = (uint64_t) __builtin_neon_vcvtnh_u64_f16(__s0); \
__ret = __builtin_bit_cast(uint64_t, __builtin_neon_vcvtnh_u64_f16(__s0)); \
__ret; \
})
#define vcvtph_s16_f16(__p0) __extension__ ({ \
int16_t __ret; \
float16_t __s0 = __p0; \
__ret = (int16_t) __builtin_neon_vcvtph_s16_f16(__s0); \
__ret = __builtin_bit_cast(int16_t, __builtin_neon_vcvtph_s16_f16(__s0)); \
__ret; \
})
#define vcvtph_s32_f16(__p0) __extension__ ({ \
int32_t __ret; \
float16_t __s0 = __p0; \
__ret = (int32_t) __builtin_neon_vcvtph_s32_f16(__s0); \
__ret = __builtin_bit_cast(int32_t, __builtin_neon_vcvtph_s32_f16(__s0)); \
__ret; \
})
#define vcvtph_s64_f16(__p0) __extension__ ({ \
int64_t __ret; \
float16_t __s0 = __p0; \
__ret = (int64_t) __builtin_neon_vcvtph_s64_f16(__s0); \
__ret = __builtin_bit_cast(int64_t, __builtin_neon_vcvtph_s64_f16(__s0)); \
__ret; \
})
#define vcvtph_u16_f16(__p0) __extension__ ({ \
uint16_t __ret; \
float16_t __s0 = __p0; \
__ret = (uint16_t) __builtin_neon_vcvtph_u16_f16(__s0); \
__ret = __builtin_bit_cast(uint16_t, __builtin_neon_vcvtph_u16_f16(__s0)); \
__ret; \
})
#define vcvtph_u32_f16(__p0) __extension__ ({ \
uint32_t __ret; \
float16_t __s0 = __p0; \
__ret = (uint32_t) __builtin_neon_vcvtph_u32_f16(__s0); \
__ret = __builtin_bit_cast(uint32_t, __builtin_neon_vcvtph_u32_f16(__s0)); \
__ret; \
})
#define vcvtph_u64_f16(__p0) __extension__ ({ \
uint64_t __ret; \
float16_t __s0 = __p0; \
__ret = (uint64_t) __builtin_neon_vcvtph_u64_f16(__s0); \
__ret = __builtin_bit_cast(uint64_t, __builtin_neon_vcvtph_u64_f16(__s0)); \
__ret; \
})
#define vdivh_f16(__p0, __p1) __extension__ ({ \
float16_t __ret; \
float16_t __s0 = __p0; \
float16_t __s1 = __p1; \
__ret = (float16_t) __builtin_neon_vdivh_f16(__s0, __s1); \
__ret = __builtin_bit_cast(float16_t, __builtin_neon_vdivh_f16(__s0, __s1)); \
__ret; \
})
#define vfmah_f16(__p0, __p1, __p2) __extension__ ({ \
@ -443,7 +443,7 @@ typedef __fp16 float16_t;
float16_t __s0 = __p0; \
float16_t __s1 = __p1; \
float16_t __s2 = __p2; \
__ret = (float16_t) __builtin_neon_vfmah_f16(__s0, __s1, __s2); \
__ret = __builtin_bit_cast(float16_t, __builtin_neon_vfmah_f16(__s0, __s1, __s2)); \
__ret; \
})
#define vfmsh_f16(__p0, __p1, __p2) __extension__ ({ \
@ -451,142 +451,142 @@ typedef __fp16 float16_t;
float16_t __s0 = __p0; \
float16_t __s1 = __p1; \
float16_t __s2 = __p2; \
__ret = (float16_t) __builtin_neon_vfmsh_f16(__s0, __s1, __s2); \
__ret = __builtin_bit_cast(float16_t, __builtin_neon_vfmsh_f16(__s0, __s1, __s2)); \
__ret; \
})
#define vmaxh_f16(__p0, __p1) __extension__ ({ \
float16_t __ret; \
float16_t __s0 = __p0; \
float16_t __s1 = __p1; \
__ret = (float16_t) __builtin_neon_vmaxh_f16(__s0, __s1); \
__ret = __builtin_bit_cast(float16_t, __builtin_neon_vmaxh_f16(__s0, __s1)); \
__ret; \
})
#define vmaxnmh_f16(__p0, __p1) __extension__ ({ \
float16_t __ret; \
float16_t __s0 = __p0; \
float16_t __s1 = __p1; \
__ret = (float16_t) __builtin_neon_vmaxnmh_f16(__s0, __s1); \
__ret = __builtin_bit_cast(float16_t, __builtin_neon_vmaxnmh_f16(__s0, __s1)); \
__ret; \
})
#define vminh_f16(__p0, __p1) __extension__ ({ \
float16_t __ret; \
float16_t __s0 = __p0; \
float16_t __s1 = __p1; \
__ret = (float16_t) __builtin_neon_vminh_f16(__s0, __s1); \
__ret = __builtin_bit_cast(float16_t, __builtin_neon_vminh_f16(__s0, __s1)); \
__ret; \
})
#define vminnmh_f16(__p0, __p1) __extension__ ({ \
float16_t __ret; \
float16_t __s0 = __p0; \
float16_t __s1 = __p1; \
__ret = (float16_t) __builtin_neon_vminnmh_f16(__s0, __s1); \
__ret = __builtin_bit_cast(float16_t, __builtin_neon_vminnmh_f16(__s0, __s1)); \
__ret; \
})
#define vmulh_f16(__p0, __p1) __extension__ ({ \
float16_t __ret; \
float16_t __s0 = __p0; \
float16_t __s1 = __p1; \
__ret = (float16_t) __builtin_neon_vmulh_f16(__s0, __s1); \
__ret = __builtin_bit_cast(float16_t, __builtin_neon_vmulh_f16(__s0, __s1)); \
__ret; \
})
#define vmulxh_f16(__p0, __p1) __extension__ ({ \
float16_t __ret; \
float16_t __s0 = __p0; \
float16_t __s1 = __p1; \
__ret = (float16_t) __builtin_neon_vmulxh_f16(__s0, __s1); \
__ret = __builtin_bit_cast(float16_t, __builtin_neon_vmulxh_f16(__s0, __s1)); \
__ret; \
})
#define vnegh_f16(__p0) __extension__ ({ \
float16_t __ret; \
float16_t __s0 = __p0; \
__ret = (float16_t) __builtin_neon_vnegh_f16(__s0); \
__ret = __builtin_bit_cast(float16_t, __builtin_neon_vnegh_f16(__s0)); \
__ret; \
})
#define vrecpeh_f16(__p0) __extension__ ({ \
float16_t __ret; \
float16_t __s0 = __p0; \
__ret = (float16_t) __builtin_neon_vrecpeh_f16(__s0); \
__ret = __builtin_bit_cast(float16_t, __builtin_neon_vrecpeh_f16(__s0)); \
__ret; \
})
#define vrecpsh_f16(__p0, __p1) __extension__ ({ \
float16_t __ret; \
float16_t __s0 = __p0; \
float16_t __s1 = __p1; \
__ret = (float16_t) __builtin_neon_vrecpsh_f16(__s0, __s1); \
__ret = __builtin_bit_cast(float16_t, __builtin_neon_vrecpsh_f16(__s0, __s1)); \
__ret; \
})
#define vrecpxh_f16(__p0) __extension__ ({ \
float16_t __ret; \
float16_t __s0 = __p0; \
__ret = (float16_t) __builtin_neon_vrecpxh_f16(__s0); \
__ret = __builtin_bit_cast(float16_t, __builtin_neon_vrecpxh_f16(__s0)); \
__ret; \
})
#define vrndh_f16(__p0) __extension__ ({ \
float16_t __ret; \
float16_t __s0 = __p0; \
__ret = (float16_t) __builtin_neon_vrndh_f16(__s0); \
__ret = __builtin_bit_cast(float16_t, __builtin_neon_vrndh_f16(__s0)); \
__ret; \
})
#define vrndah_f16(__p0) __extension__ ({ \
float16_t __ret; \
float16_t __s0 = __p0; \
__ret = (float16_t) __builtin_neon_vrndah_f16(__s0); \
__ret = __builtin_bit_cast(float16_t, __builtin_neon_vrndah_f16(__s0)); \
__ret; \
})
#define vrndih_f16(__p0) __extension__ ({ \
float16_t __ret; \
float16_t __s0 = __p0; \
__ret = (float16_t) __builtin_neon_vrndih_f16(__s0); \
__ret = __builtin_bit_cast(float16_t, __builtin_neon_vrndih_f16(__s0)); \
__ret; \
})
#define vrndmh_f16(__p0) __extension__ ({ \
float16_t __ret; \
float16_t __s0 = __p0; \
__ret = (float16_t) __builtin_neon_vrndmh_f16(__s0); \
__ret = __builtin_bit_cast(float16_t, __builtin_neon_vrndmh_f16(__s0)); \
__ret; \
})
#define vrndnh_f16(__p0) __extension__ ({ \
float16_t __ret; \
float16_t __s0 = __p0; \
__ret = (float16_t) __builtin_neon_vrndnh_f16(__s0); \
__ret = __builtin_bit_cast(float16_t, __builtin_neon_vrndnh_f16(__s0)); \
__ret; \
})
#define vrndph_f16(__p0) __extension__ ({ \
float16_t __ret; \
float16_t __s0 = __p0; \
__ret = (float16_t) __builtin_neon_vrndph_f16(__s0); \
__ret = __builtin_bit_cast(float16_t, __builtin_neon_vrndph_f16(__s0)); \
__ret; \
})
#define vrndxh_f16(__p0) __extension__ ({ \
float16_t __ret; \
float16_t __s0 = __p0; \
__ret = (float16_t) __builtin_neon_vrndxh_f16(__s0); \
__ret = __builtin_bit_cast(float16_t, __builtin_neon_vrndxh_f16(__s0)); \
__ret; \
})
#define vrsqrteh_f16(__p0) __extension__ ({ \
float16_t __ret; \
float16_t __s0 = __p0; \
__ret = (float16_t) __builtin_neon_vrsqrteh_f16(__s0); \
__ret = __builtin_bit_cast(float16_t, __builtin_neon_vrsqrteh_f16(__s0)); \
__ret; \
})
#define vrsqrtsh_f16(__p0, __p1) __extension__ ({ \
float16_t __ret; \
float16_t __s0 = __p0; \
float16_t __s1 = __p1; \
__ret = (float16_t) __builtin_neon_vrsqrtsh_f16(__s0, __s1); \
__ret = __builtin_bit_cast(float16_t, __builtin_neon_vrsqrtsh_f16(__s0, __s1)); \
__ret; \
})
#define vsqrth_f16(__p0) __extension__ ({ \
float16_t __ret; \
float16_t __s0 = __p0; \
__ret = (float16_t) __builtin_neon_vsqrth_f16(__s0); \
__ret = __builtin_bit_cast(float16_t, __builtin_neon_vsqrth_f16(__s0)); \
__ret; \
})
#define vsubh_f16(__p0, __p1) __extension__ ({ \
float16_t __ret; \
float16_t __s0 = __p0; \
float16_t __s1 = __p1; \
__ret = (float16_t) __builtin_neon_vsubh_f16(__s0, __s1); \
__ret = __builtin_bit_cast(float16_t, __builtin_neon_vsubh_f16(__s0, __s1)); \
__ret; \
})
#endif

42776
lib/include/arm_neon.h vendored

File diff suppressed because it is too large Load Diff

694
lib/include/arm_sme.h vendored
View File

@ -146,6 +146,8 @@ __ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_hor_za128_s32
svint32_t svread_hor_za128_s32_m(svint32_t, svbool_t, uint64_t, uint32_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_hor_za128_s64_m)))
svint64_t svread_hor_za128_s64_m(svint64_t, svbool_t, uint64_t, uint32_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_hor_za128_mf8_m)))
svmfloat8_t svread_hor_za128_mf8_m(svmfloat8_t, svbool_t, uint64_t, uint32_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_hor_za128_s16_m)))
svint16_t svread_hor_za128_s16_m(svint16_t, svbool_t, uint64_t, uint32_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_hor_za16_u16_m)))
@ -172,6 +174,8 @@ __ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_hor_za8_u8_m)
svuint8_t svread_hor_za8_u8_m(svuint8_t, svbool_t, uint64_t, uint32_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_hor_za8_s8_m)))
svint8_t svread_hor_za8_s8_m(svint8_t, svbool_t, uint64_t, uint32_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_hor_za8_mf8_m)))
svmfloat8_t svread_hor_za8_mf8_m(svmfloat8_t, svbool_t, uint64_t, uint32_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_ver_za128_u8_m)))
svuint8_t svread_ver_za128_u8_m(svuint8_t, svbool_t, uint64_t, uint32_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_ver_za128_u32_m)))
@ -194,6 +198,8 @@ __ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_ver_za128_s32
svint32_t svread_ver_za128_s32_m(svint32_t, svbool_t, uint64_t, uint32_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_ver_za128_s64_m)))
svint64_t svread_ver_za128_s64_m(svint64_t, svbool_t, uint64_t, uint32_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_ver_za128_mf8_m)))
svmfloat8_t svread_ver_za128_mf8_m(svmfloat8_t, svbool_t, uint64_t, uint32_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_ver_za128_s16_m)))
svint16_t svread_ver_za128_s16_m(svint16_t, svbool_t, uint64_t, uint32_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_ver_za16_u16_m)))
@ -220,6 +226,8 @@ __ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_ver_za8_u8_m)
svuint8_t svread_ver_za8_u8_m(svuint8_t, svbool_t, uint64_t, uint32_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_ver_za8_s8_m)))
svint8_t svread_ver_za8_s8_m(svint8_t, svbool_t, uint64_t, uint32_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_ver_za8_mf8_m)))
svmfloat8_t svread_ver_za8_mf8_m(svmfloat8_t, svbool_t, uint64_t, uint32_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svst1_hor_vnum_za128)))
void svst1_hor_vnum_za128(uint64_t, uint32_t, svbool_t, void *, int64_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svst1_hor_vnum_za16)))
@ -294,6 +302,8 @@ __ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za128_s3
void svwrite_hor_za128_s32_m(uint64_t, uint32_t, svbool_t, svint32_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za128_s64_m)))
void svwrite_hor_za128_s64_m(uint64_t, uint32_t, svbool_t, svint64_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za128_mf8_m)))
void svwrite_hor_za128_mf8_m(uint64_t, uint32_t, svbool_t, svmfloat8_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za128_s16_m)))
void svwrite_hor_za128_s16_m(uint64_t, uint32_t, svbool_t, svint16_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za16_u16_m)))
@ -320,6 +330,8 @@ __ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za8_u8_m
void svwrite_hor_za8_u8_m(uint64_t, uint32_t, svbool_t, svuint8_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za8_s8_m)))
void svwrite_hor_za8_s8_m(uint64_t, uint32_t, svbool_t, svint8_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za8_mf8_m)))
void svwrite_hor_za8_mf8_m(uint64_t, uint32_t, svbool_t, svmfloat8_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za128_u8_m)))
void svwrite_ver_za128_u8_m(uint64_t, uint32_t, svbool_t, svuint8_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za128_u32_m)))
@ -342,6 +354,8 @@ __ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za128_s3
void svwrite_ver_za128_s32_m(uint64_t, uint32_t, svbool_t, svint32_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za128_s64_m)))
void svwrite_ver_za128_s64_m(uint64_t, uint32_t, svbool_t, svint64_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za128_mf8_m)))
void svwrite_ver_za128_mf8_m(uint64_t, uint32_t, svbool_t, svmfloat8_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za128_s16_m)))
void svwrite_ver_za128_s16_m(uint64_t, uint32_t, svbool_t, svint16_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za16_u16_m)))
@ -368,6 +382,8 @@ __ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za8_u8_m
void svwrite_ver_za8_u8_m(uint64_t, uint32_t, svbool_t, svuint8_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za8_s8_m)))
void svwrite_ver_za8_s8_m(uint64_t, uint32_t, svbool_t, svint8_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za8_mf8_m)))
void svwrite_ver_za8_mf8_m(uint64_t, uint32_t, svbool_t, svmfloat8_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svzero_mask_za)))
void svzero_mask_za(uint64_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svzero_za)))
@ -422,6 +438,8 @@ __aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_hor_za128_s3
svint32_t svread_hor_za128_m(svint32_t, svbool_t, uint64_t, uint32_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_hor_za128_s64_m)))
svint64_t svread_hor_za128_m(svint64_t, svbool_t, uint64_t, uint32_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_hor_za128_mf8_m)))
svmfloat8_t svread_hor_za128_m(svmfloat8_t, svbool_t, uint64_t, uint32_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_hor_za128_s16_m)))
svint16_t svread_hor_za128_m(svint16_t, svbool_t, uint64_t, uint32_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_hor_za16_u16_m)))
@ -448,6 +466,8 @@ __aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_hor_za8_u8_m
svuint8_t svread_hor_za8_m(svuint8_t, svbool_t, uint64_t, uint32_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_hor_za8_s8_m)))
svint8_t svread_hor_za8_m(svint8_t, svbool_t, uint64_t, uint32_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_hor_za8_mf8_m)))
svmfloat8_t svread_hor_za8_m(svmfloat8_t, svbool_t, uint64_t, uint32_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_ver_za128_u8_m)))
svuint8_t svread_ver_za128_m(svuint8_t, svbool_t, uint64_t, uint32_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_ver_za128_u32_m)))
@ -470,6 +490,8 @@ __aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_ver_za128_s3
svint32_t svread_ver_za128_m(svint32_t, svbool_t, uint64_t, uint32_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_ver_za128_s64_m)))
svint64_t svread_ver_za128_m(svint64_t, svbool_t, uint64_t, uint32_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_ver_za128_mf8_m)))
svmfloat8_t svread_ver_za128_m(svmfloat8_t, svbool_t, uint64_t, uint32_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_ver_za128_s16_m)))
svint16_t svread_ver_za128_m(svint16_t, svbool_t, uint64_t, uint32_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_ver_za16_u16_m)))
@ -496,6 +518,8 @@ __aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_ver_za8_u8_m
svuint8_t svread_ver_za8_m(svuint8_t, svbool_t, uint64_t, uint32_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_ver_za8_s8_m)))
svint8_t svread_ver_za8_m(svint8_t, svbool_t, uint64_t, uint32_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_ver_za8_mf8_m)))
svmfloat8_t svread_ver_za8_m(svmfloat8_t, svbool_t, uint64_t, uint32_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svsumopa_za32_s8_m)))
void svsumopa_za32_m(uint64_t, svbool_t, svbool_t, svint8_t, svuint8_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svsumops_za32_s8_m)))
@ -526,6 +550,8 @@ __aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za128_s
void svwrite_hor_za128_m(uint64_t, uint32_t, svbool_t, svint32_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za128_s64_m)))
void svwrite_hor_za128_m(uint64_t, uint32_t, svbool_t, svint64_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za128_mf8_m)))
void svwrite_hor_za128_m(uint64_t, uint32_t, svbool_t, svmfloat8_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za128_s16_m)))
void svwrite_hor_za128_m(uint64_t, uint32_t, svbool_t, svint16_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za16_u16_m)))
@ -552,6 +578,8 @@ __aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za8_u8_
void svwrite_hor_za8_m(uint64_t, uint32_t, svbool_t, svuint8_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za8_s8_m)))
void svwrite_hor_za8_m(uint64_t, uint32_t, svbool_t, svint8_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za8_mf8_m)))
void svwrite_hor_za8_m(uint64_t, uint32_t, svbool_t, svmfloat8_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za128_u8_m)))
void svwrite_ver_za128_m(uint64_t, uint32_t, svbool_t, svuint8_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za128_u32_m)))
@ -574,6 +602,8 @@ __aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za128_s
void svwrite_ver_za128_m(uint64_t, uint32_t, svbool_t, svint32_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za128_s64_m)))
void svwrite_ver_za128_m(uint64_t, uint32_t, svbool_t, svint64_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za128_mf8_m)))
void svwrite_ver_za128_m(uint64_t, uint32_t, svbool_t, svmfloat8_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za128_s16_m)))
void svwrite_ver_za128_m(uint64_t, uint32_t, svbool_t, svint16_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za16_u16_m)))
@ -600,6 +630,8 @@ __aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za8_u8_
void svwrite_ver_za8_m(uint64_t, uint32_t, svbool_t, svuint8_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za8_s8_m)))
void svwrite_ver_za8_m(uint64_t, uint32_t, svbool_t, svint8_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za8_mf8_m)))
void svwrite_ver_za8_m(uint64_t, uint32_t, svbool_t, svmfloat8_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svadd_za16_f16_vg1x2)))
void svadd_za16_f16_vg1x2(uint32_t, svfloat16x2_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svadd_za16_f16_vg1x4)))
@ -1158,6 +1190,8 @@ __ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svluti2_lane_zt_f16)
svfloat16_t svluti2_lane_zt_f16(uint64_t, svuint8_t, uint64_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svluti2_lane_zt_s32)))
svint32_t svluti2_lane_zt_s32(uint64_t, svuint8_t, uint64_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svluti2_lane_zt_mf8)))
svmfloat8_t svluti2_lane_zt_mf8(uint64_t, svuint8_t, uint64_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svluti2_lane_zt_s16)))
svint16_t svluti2_lane_zt_s16(uint64_t, svuint8_t, uint64_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svluti2_lane_zt_u8_x2)))
@ -1176,6 +1210,8 @@ __ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svluti2_lane_zt_f16_
svfloat16x2_t svluti2_lane_zt_f16_x2(uint64_t, svuint8_t, uint64_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svluti2_lane_zt_s32_x2)))
svint32x2_t svluti2_lane_zt_s32_x2(uint64_t, svuint8_t, uint64_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svluti2_lane_zt_mf8_x2)))
svmfloat8x2_t svluti2_lane_zt_mf8_x2(uint64_t, svuint8_t, uint64_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svluti2_lane_zt_s16_x2)))
svint16x2_t svluti2_lane_zt_s16_x2(uint64_t, svuint8_t, uint64_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svluti2_lane_zt_u8_x4)))
@ -1194,6 +1230,8 @@ __ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svluti2_lane_zt_f16_
svfloat16x4_t svluti2_lane_zt_f16_x4(uint64_t, svuint8_t, uint64_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svluti2_lane_zt_s32_x4)))
svint32x4_t svluti2_lane_zt_s32_x4(uint64_t, svuint8_t, uint64_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svluti2_lane_zt_mf8_x4)))
svmfloat8x4_t svluti2_lane_zt_mf8_x4(uint64_t, svuint8_t, uint64_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svluti2_lane_zt_s16_x4)))
svint16x4_t svluti2_lane_zt_s16_x4(uint64_t, svuint8_t, uint64_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svluti4_lane_zt_u8)))
@ -1212,6 +1250,8 @@ __ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svluti4_lane_zt_f16)
svfloat16_t svluti4_lane_zt_f16(uint64_t, svuint8_t, uint64_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svluti4_lane_zt_s32)))
svint32_t svluti4_lane_zt_s32(uint64_t, svuint8_t, uint64_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svluti4_lane_zt_mf8)))
svmfloat8_t svluti4_lane_zt_mf8(uint64_t, svuint8_t, uint64_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svluti4_lane_zt_s16)))
svint16_t svluti4_lane_zt_s16(uint64_t, svuint8_t, uint64_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svluti4_lane_zt_u8_x2)))
@ -1230,6 +1270,8 @@ __ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svluti4_lane_zt_f16_
svfloat16x2_t svluti4_lane_zt_f16_x2(uint64_t, svuint8_t, uint64_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svluti4_lane_zt_s32_x2)))
svint32x2_t svluti4_lane_zt_s32_x2(uint64_t, svuint8_t, uint64_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svluti4_lane_zt_mf8_x2)))
svmfloat8x2_t svluti4_lane_zt_mf8_x2(uint64_t, svuint8_t, uint64_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svluti4_lane_zt_s16_x2)))
svint16x2_t svluti4_lane_zt_s16_x2(uint64_t, svuint8_t, uint64_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svluti4_lane_zt_u32_x4)))
@ -1514,10 +1556,14 @@ __ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_hor_za8_u8_vg
svuint8x2_t svread_hor_za8_u8_vg2(uint64_t, uint32_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_hor_za8_s8_vg2)))
svint8x2_t svread_hor_za8_s8_vg2(uint64_t, uint32_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_hor_za8_mf8_vg2)))
svmfloat8x2_t svread_hor_za8_mf8_vg2(uint64_t, uint32_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_hor_za8_u8_vg4)))
svuint8x4_t svread_hor_za8_u8_vg4(uint64_t, uint32_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_hor_za8_s8_vg4)))
svint8x4_t svread_hor_za8_s8_vg4(uint64_t, uint32_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_hor_za8_mf8_vg4)))
svmfloat8x4_t svread_hor_za8_mf8_vg4(uint64_t, uint32_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_ver_za16_u16_vg2)))
svuint16x2_t svread_ver_za16_u16_vg2(uint64_t, uint32_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_ver_za16_bf16_vg2)))
@ -1562,10 +1608,14 @@ __ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_ver_za8_u8_vg
svuint8x2_t svread_ver_za8_u8_vg2(uint64_t, uint32_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_ver_za8_s8_vg2)))
svint8x2_t svread_ver_za8_s8_vg2(uint64_t, uint32_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_ver_za8_mf8_vg2)))
svmfloat8x2_t svread_ver_za8_mf8_vg2(uint64_t, uint32_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_ver_za8_u8_vg4)))
svuint8x4_t svread_ver_za8_u8_vg4(uint64_t, uint32_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_ver_za8_s8_vg4)))
svint8x4_t svread_ver_za8_s8_vg4(uint64_t, uint32_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_ver_za8_mf8_vg4)))
svmfloat8x4_t svread_ver_za8_mf8_vg4(uint64_t, uint32_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_za16_u16_vg1x2)))
svuint16x2_t svread_za16_u16_vg1x2(uint32_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_za16_bf16_vg1x2)))
@ -1610,10 +1660,14 @@ __ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_za8_u8_vg1x2)
svuint8x2_t svread_za8_u8_vg1x2(uint32_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_za8_s8_vg1x2)))
svint8x2_t svread_za8_s8_vg1x2(uint32_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_za8_mf8_vg1x2)))
svmfloat8x2_t svread_za8_mf8_vg1x2(uint32_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_za8_u8_vg1x4)))
svuint8x4_t svread_za8_u8_vg1x4(uint32_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_za8_s8_vg1x4)))
svint8x4_t svread_za8_s8_vg1x4(uint32_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_za8_mf8_vg1x4)))
svmfloat8x4_t svread_za8_mf8_vg1x4(uint32_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svstr_zt)))
void svstr_zt(uint64_t, void *);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svsub_write_single_za32_u32_vg1x2)))
@ -1760,10 +1814,14 @@ __ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za8_u8_v
void svwrite_hor_za8_u8_vg2(uint64_t, uint32_t, svuint8x2_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za8_s8_vg2)))
void svwrite_hor_za8_s8_vg2(uint64_t, uint32_t, svint8x2_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za8_mf8_vg2)))
void svwrite_hor_za8_mf8_vg2(uint64_t, uint32_t, svmfloat8x2_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za8_u8_vg4)))
void svwrite_hor_za8_u8_vg4(uint64_t, uint32_t, svuint8x4_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za8_s8_vg4)))
void svwrite_hor_za8_s8_vg4(uint64_t, uint32_t, svint8x4_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za8_mf8_vg4)))
void svwrite_hor_za8_mf8_vg4(uint64_t, uint32_t, svmfloat8x4_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za16_u16_vg2)))
void svwrite_ver_za16_u16_vg2(uint64_t, uint32_t, svuint16x2_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za16_bf16_vg2)))
@ -1808,10 +1866,14 @@ __ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za8_u8_v
void svwrite_ver_za8_u8_vg2(uint64_t, uint32_t, svuint8x2_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za8_s8_vg2)))
void svwrite_ver_za8_s8_vg2(uint64_t, uint32_t, svint8x2_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za8_mf8_vg2)))
void svwrite_ver_za8_mf8_vg2(uint64_t, uint32_t, svmfloat8x2_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za8_u8_vg4)))
void svwrite_ver_za8_u8_vg4(uint64_t, uint32_t, svuint8x4_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za8_s8_vg4)))
void svwrite_ver_za8_s8_vg4(uint64_t, uint32_t, svint8x4_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za8_mf8_vg4)))
void svwrite_ver_za8_mf8_vg4(uint64_t, uint32_t, svmfloat8x4_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_za16_u16_vg1x2)))
void svwrite_za16_u16_vg1x2(uint32_t, svuint16x2_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_za16_bf16_vg1x2)))
@ -1856,10 +1918,14 @@ __ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_za8_u8_vg1x2
void svwrite_za8_u8_vg1x2(uint32_t, svuint8x2_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_za8_s8_vg1x2)))
void svwrite_za8_s8_vg1x2(uint32_t, svint8x2_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_za8_mf8_vg1x2)))
void svwrite_za8_mf8_vg1x2(uint32_t, svmfloat8x2_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_za8_u8_vg1x4)))
void svwrite_za8_u8_vg1x4(uint32_t, svuint8x4_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_za8_s8_vg1x4)))
void svwrite_za8_s8_vg1x4(uint32_t, svint8x4_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_za8_mf8_vg1x4)))
void svwrite_za8_mf8_vg1x4(uint32_t, svmfloat8x4_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svzero_zt)))
void svzero_zt(uint64_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svadd_write_single_za32_u32_vg1x2)))
@ -2338,10 +2404,14 @@ __aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za8_u8_
void svwrite_hor_za8_vg2(uint64_t, uint32_t, svuint8x2_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za8_s8_vg2)))
void svwrite_hor_za8_vg2(uint64_t, uint32_t, svint8x2_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za8_mf8_vg2)))
void svwrite_hor_za8_vg2(uint64_t, uint32_t, svmfloat8x2_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za8_u8_vg4)))
void svwrite_hor_za8_vg4(uint64_t, uint32_t, svuint8x4_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za8_s8_vg4)))
void svwrite_hor_za8_vg4(uint64_t, uint32_t, svint8x4_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za8_mf8_vg4)))
void svwrite_hor_za8_vg4(uint64_t, uint32_t, svmfloat8x4_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za16_u16_vg2)))
void svwrite_ver_za16_vg2(uint64_t, uint32_t, svuint16x2_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za16_bf16_vg2)))
@ -2386,10 +2456,14 @@ __aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za8_u8_
void svwrite_ver_za8_vg2(uint64_t, uint32_t, svuint8x2_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za8_s8_vg2)))
void svwrite_ver_za8_vg2(uint64_t, uint32_t, svint8x2_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za8_mf8_vg2)))
void svwrite_ver_za8_vg2(uint64_t, uint32_t, svmfloat8x2_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za8_u8_vg4)))
void svwrite_ver_za8_vg4(uint64_t, uint32_t, svuint8x4_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za8_s8_vg4)))
void svwrite_ver_za8_vg4(uint64_t, uint32_t, svint8x4_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za8_mf8_vg4)))
void svwrite_ver_za8_vg4(uint64_t, uint32_t, svmfloat8x4_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_za16_u16_vg1x2)))
void svwrite_za16_vg1x2(uint32_t, svuint16x2_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_za16_bf16_vg1x2)))
@ -2434,10 +2508,14 @@ __aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_za8_u8_vg1x
void svwrite_za8_vg1x2(uint32_t, svuint8x2_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_za8_s8_vg1x2)))
void svwrite_za8_vg1x2(uint32_t, svint8x2_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_za8_mf8_vg1x2)))
void svwrite_za8_vg1x2(uint32_t, svmfloat8x2_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_za8_u8_vg1x4)))
void svwrite_za8_vg1x4(uint32_t, svuint8x4_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_za8_s8_vg1x4)))
void svwrite_za8_vg1x4(uint32_t, svint8x4_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_za8_mf8_vg1x4)))
void svwrite_za8_vg1x4(uint32_t, svmfloat8x4_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svadd_za64_f64_vg1x2)))
void svadd_za64_f64_vg1x2(uint32_t, svfloat64x2_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svadd_za64_f64_vg1x4)))
@ -2782,6 +2860,602 @@ __aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svvdot_lane_za64_s1
void svvdot_lane_za64_vg1x4(uint32_t, svint16x4_t, svint16_t, uint64_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svvdot_lane_za64_u16_vg1x4)))
void svvdot_lane_za64_vg1x4(uint32_t, svuint16x4_t, svuint16_t, uint64_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_1x1_za32_s8_u8)))
void svmop4a_1x1_za32_s8_u8(uint64_t, svint8_t, svuint8_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_1x1_za32_u8_s8)))
void svmop4a_1x1_za32_u8_s8(uint64_t, svuint8_t, svint8_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_1x1_za32_bf16_bf16)))
void svmop4a_1x1_za32_bf16_bf16(uint64_t, svbfloat16_t, svbfloat16_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_1x1_za32_f16_f16)))
void svmop4a_1x1_za32_f16_f16(uint64_t, svfloat16_t, svfloat16_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_1x1_za32_f32_f32)))
void svmop4a_1x1_za32_f32_f32(uint64_t, svfloat32_t, svfloat32_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_1x1_za32_s8_s8)))
void svmop4a_1x1_za32_s8_s8(uint64_t, svint8_t, svint8_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_1x1_za32_s16_s16)))
void svmop4a_1x1_za32_s16_s16(uint64_t, svint16_t, svint16_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_1x1_za32_u8_u8)))
void svmop4a_1x1_za32_u8_u8(uint64_t, svuint8_t, svuint8_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_1x1_za32_u16_u16)))
void svmop4a_1x1_za32_u16_u16(uint64_t, svuint16_t, svuint16_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_1x2_za32_s8_u8)))
void svmop4a_1x2_za32_s8_u8(uint64_t, svint8_t, svuint8x2_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_1x2_za32_u8_s8)))
void svmop4a_1x2_za32_u8_s8(uint64_t, svuint8_t, svint8x2_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_1x2_za32_bf16_bf16)))
void svmop4a_1x2_za32_bf16_bf16(uint64_t, svbfloat16_t, svbfloat16x2_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_1x2_za32_f16_f16)))
void svmop4a_1x2_za32_f16_f16(uint64_t, svfloat16_t, svfloat16x2_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_1x2_za32_f32_f32)))
void svmop4a_1x2_za32_f32_f32(uint64_t, svfloat32_t, svfloat32x2_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_1x2_za32_s8_s8)))
void svmop4a_1x2_za32_s8_s8(uint64_t, svint8_t, svint8x2_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_1x2_za32_s16_s16)))
void svmop4a_1x2_za32_s16_s16(uint64_t, svint16_t, svint16x2_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_1x2_za32_u8_u8)))
void svmop4a_1x2_za32_u8_u8(uint64_t, svuint8_t, svuint8x2_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_1x2_za32_u16_u16)))
void svmop4a_1x2_za32_u16_u16(uint64_t, svuint16_t, svuint16x2_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_2x1_za32_s8_u8)))
void svmop4a_2x1_za32_s8_u8(uint64_t, svint8x2_t, svuint8_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_2x1_za32_u8_s8)))
void svmop4a_2x1_za32_u8_s8(uint64_t, svuint8x2_t, svint8_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_2x1_za32_bf16_bf16)))
void svmop4a_2x1_za32_bf16_bf16(uint64_t, svbfloat16x2_t, svbfloat16_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_2x1_za32_f16_f16)))
void svmop4a_2x1_za32_f16_f16(uint64_t, svfloat16x2_t, svfloat16_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_2x1_za32_f32_f32)))
void svmop4a_2x1_za32_f32_f32(uint64_t, svfloat32x2_t, svfloat32_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_2x1_za32_s8_s8)))
void svmop4a_2x1_za32_s8_s8(uint64_t, svint8x2_t, svint8_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_2x1_za32_s16_s16)))
void svmop4a_2x1_za32_s16_s16(uint64_t, svint16x2_t, svint16_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_2x1_za32_u8_u8)))
void svmop4a_2x1_za32_u8_u8(uint64_t, svuint8x2_t, svuint8_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_2x1_za32_u16_u16)))
void svmop4a_2x1_za32_u16_u16(uint64_t, svuint16x2_t, svuint16_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_2x2_za32_s8_u8)))
void svmop4a_2x2_za32_s8_u8(uint64_t, svint8x2_t, svuint8x2_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_2x2_za32_u8_s8)))
void svmop4a_2x2_za32_u8_s8(uint64_t, svuint8x2_t, svint8x2_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_2x2_za32_bf16_bf16)))
void svmop4a_2x2_za32_bf16_bf16(uint64_t, svbfloat16x2_t, svbfloat16x2_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_2x2_za32_f16_f16)))
void svmop4a_2x2_za32_f16_f16(uint64_t, svfloat16x2_t, svfloat16x2_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_2x2_za32_f32_f32)))
void svmop4a_2x2_za32_f32_f32(uint64_t, svfloat32x2_t, svfloat32x2_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_2x2_za32_s8_s8)))
void svmop4a_2x2_za32_s8_s8(uint64_t, svint8x2_t, svint8x2_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_2x2_za32_s16_s16)))
void svmop4a_2x2_za32_s16_s16(uint64_t, svint16x2_t, svint16x2_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_2x2_za32_u8_u8)))
void svmop4a_2x2_za32_u8_u8(uint64_t, svuint8x2_t, svuint8x2_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_2x2_za32_u16_u16)))
void svmop4a_2x2_za32_u16_u16(uint64_t, svuint16x2_t, svuint16x2_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_1x1_za32_s8_u8)))
void svmop4s_1x1_za32_s8_u8(uint64_t, svint8_t, svuint8_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_1x1_za32_u8_s8)))
void svmop4s_1x1_za32_u8_s8(uint64_t, svuint8_t, svint8_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_1x1_za32_bf16_bf16)))
void svmop4s_1x1_za32_bf16_bf16(uint64_t, svbfloat16_t, svbfloat16_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_1x1_za32_f16_f16)))
void svmop4s_1x1_za32_f16_f16(uint64_t, svfloat16_t, svfloat16_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_1x1_za32_f32_f32)))
void svmop4s_1x1_za32_f32_f32(uint64_t, svfloat32_t, svfloat32_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_1x1_za32_s8_s8)))
void svmop4s_1x1_za32_s8_s8(uint64_t, svint8_t, svint8_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_1x1_za32_s16_s16)))
void svmop4s_1x1_za32_s16_s16(uint64_t, svint16_t, svint16_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_1x1_za32_u8_u8)))
void svmop4s_1x1_za32_u8_u8(uint64_t, svuint8_t, svuint8_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_1x1_za32_u16_u16)))
void svmop4s_1x1_za32_u16_u16(uint64_t, svuint16_t, svuint16_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_1x2_za32_s8_u8)))
void svmop4s_1x2_za32_s8_u8(uint64_t, svint8_t, svuint8x2_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_1x2_za32_u8_s8)))
void svmop4s_1x2_za32_u8_s8(uint64_t, svuint8_t, svint8x2_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_1x2_za32_bf16_bf16)))
void svmop4s_1x2_za32_bf16_bf16(uint64_t, svbfloat16_t, svbfloat16x2_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_1x2_za32_f16_f16)))
void svmop4s_1x2_za32_f16_f16(uint64_t, svfloat16_t, svfloat16x2_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_1x2_za32_f32_f32)))
void svmop4s_1x2_za32_f32_f32(uint64_t, svfloat32_t, svfloat32x2_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_1x2_za32_s8_s8)))
void svmop4s_1x2_za32_s8_s8(uint64_t, svint8_t, svint8x2_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_1x2_za32_s16_s16)))
void svmop4s_1x2_za32_s16_s16(uint64_t, svint16_t, svint16x2_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_1x2_za32_u8_u8)))
void svmop4s_1x2_za32_u8_u8(uint64_t, svuint8_t, svuint8x2_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_1x2_za32_u16_u16)))
void svmop4s_1x2_za32_u16_u16(uint64_t, svuint16_t, svuint16x2_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_2x1_za32_s8_u8)))
void svmop4s_2x1_za32_s8_u8(uint64_t, svint8x2_t, svuint8_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_2x1_za32_u8_s8)))
void svmop4s_2x1_za32_u8_s8(uint64_t, svuint8x2_t, svint8_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_2x1_za32_bf16_bf16)))
void svmop4s_2x1_za32_bf16_bf16(uint64_t, svbfloat16x2_t, svbfloat16_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_2x1_za32_f16_f16)))
void svmop4s_2x1_za32_f16_f16(uint64_t, svfloat16x2_t, svfloat16_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_2x1_za32_f32_f32)))
void svmop4s_2x1_za32_f32_f32(uint64_t, svfloat32x2_t, svfloat32_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_2x1_za32_s8_s8)))
void svmop4s_2x1_za32_s8_s8(uint64_t, svint8x2_t, svint8_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_2x1_za32_s16_s16)))
void svmop4s_2x1_za32_s16_s16(uint64_t, svint16x2_t, svint16_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_2x1_za32_u8_u8)))
void svmop4s_2x1_za32_u8_u8(uint64_t, svuint8x2_t, svuint8_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_2x1_za32_u16_u16)))
void svmop4s_2x1_za32_u16_u16(uint64_t, svuint16x2_t, svuint16_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_2x2_za32_s8_u8)))
void svmop4s_2x2_za32_s8_u8(uint64_t, svint8x2_t, svuint8x2_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_2x2_za32_u8_s8)))
void svmop4s_2x2_za32_u8_s8(uint64_t, svuint8x2_t, svint8x2_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_2x2_za32_bf16_bf16)))
void svmop4s_2x2_za32_bf16_bf16(uint64_t, svbfloat16x2_t, svbfloat16x2_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_2x2_za32_f16_f16)))
void svmop4s_2x2_za32_f16_f16(uint64_t, svfloat16x2_t, svfloat16x2_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_2x2_za32_f32_f32)))
void svmop4s_2x2_za32_f32_f32(uint64_t, svfloat32x2_t, svfloat32x2_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_2x2_za32_s8_s8)))
void svmop4s_2x2_za32_s8_s8(uint64_t, svint8x2_t, svint8x2_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_2x2_za32_s16_s16)))
void svmop4s_2x2_za32_s16_s16(uint64_t, svint16x2_t, svint16x2_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_2x2_za32_u8_u8)))
void svmop4s_2x2_za32_u8_u8(uint64_t, svuint8x2_t, svuint8x2_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_2x2_za32_u16_u16)))
void svmop4s_2x2_za32_u16_u16(uint64_t, svuint16x2_t, svuint16x2_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_1x1_za32_s8_u8)))
void svmop4a_za32(uint64_t, svint8_t, svuint8_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_1x1_za32_u8_s8)))
void svmop4a_za32(uint64_t, svuint8_t, svint8_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_1x1_za32_bf16_bf16)))
void svmop4a_za32(uint64_t, svbfloat16_t, svbfloat16_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_1x1_za32_f16_f16)))
void svmop4a_za32(uint64_t, svfloat16_t, svfloat16_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_1x1_za32_f32_f32)))
void svmop4a_za32(uint64_t, svfloat32_t, svfloat32_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_1x1_za32_s8_s8)))
void svmop4a_za32(uint64_t, svint8_t, svint8_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_1x1_za32_s16_s16)))
void svmop4a_za32(uint64_t, svint16_t, svint16_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_1x1_za32_u8_u8)))
void svmop4a_za32(uint64_t, svuint8_t, svuint8_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_1x1_za32_u16_u16)))
void svmop4a_za32(uint64_t, svuint16_t, svuint16_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_1x2_za32_s8_u8)))
void svmop4a_za32(uint64_t, svint8_t, svuint8x2_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_1x2_za32_u8_s8)))
void svmop4a_za32(uint64_t, svuint8_t, svint8x2_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_1x2_za32_bf16_bf16)))
void svmop4a_za32(uint64_t, svbfloat16_t, svbfloat16x2_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_1x2_za32_f16_f16)))
void svmop4a_za32(uint64_t, svfloat16_t, svfloat16x2_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_1x2_za32_f32_f32)))
void svmop4a_za32(uint64_t, svfloat32_t, svfloat32x2_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_1x2_za32_s8_s8)))
void svmop4a_za32(uint64_t, svint8_t, svint8x2_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_1x2_za32_s16_s16)))
void svmop4a_za32(uint64_t, svint16_t, svint16x2_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_1x2_za32_u8_u8)))
void svmop4a_za32(uint64_t, svuint8_t, svuint8x2_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_1x2_za32_u16_u16)))
void svmop4a_za32(uint64_t, svuint16_t, svuint16x2_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_2x1_za32_s8_u8)))
void svmop4a_za32(uint64_t, svint8x2_t, svuint8_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_2x1_za32_u8_s8)))
void svmop4a_za32(uint64_t, svuint8x2_t, svint8_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_2x1_za32_bf16_bf16)))
void svmop4a_za32(uint64_t, svbfloat16x2_t, svbfloat16_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_2x1_za32_f16_f16)))
void svmop4a_za32(uint64_t, svfloat16x2_t, svfloat16_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_2x1_za32_f32_f32)))
void svmop4a_za32(uint64_t, svfloat32x2_t, svfloat32_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_2x1_za32_s8_s8)))
void svmop4a_za32(uint64_t, svint8x2_t, svint8_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_2x1_za32_s16_s16)))
void svmop4a_za32(uint64_t, svint16x2_t, svint16_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_2x1_za32_u8_u8)))
void svmop4a_za32(uint64_t, svuint8x2_t, svuint8_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_2x1_za32_u16_u16)))
void svmop4a_za32(uint64_t, svuint16x2_t, svuint16_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_2x2_za32_s8_u8)))
void svmop4a_za32(uint64_t, svint8x2_t, svuint8x2_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_2x2_za32_u8_s8)))
void svmop4a_za32(uint64_t, svuint8x2_t, svint8x2_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_2x2_za32_bf16_bf16)))
void svmop4a_za32(uint64_t, svbfloat16x2_t, svbfloat16x2_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_2x2_za32_f16_f16)))
void svmop4a_za32(uint64_t, svfloat16x2_t, svfloat16x2_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_2x2_za32_f32_f32)))
void svmop4a_za32(uint64_t, svfloat32x2_t, svfloat32x2_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_2x2_za32_s8_s8)))
void svmop4a_za32(uint64_t, svint8x2_t, svint8x2_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_2x2_za32_s16_s16)))
void svmop4a_za32(uint64_t, svint16x2_t, svint16x2_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_2x2_za32_u8_u8)))
void svmop4a_za32(uint64_t, svuint8x2_t, svuint8x2_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_2x2_za32_u16_u16)))
void svmop4a_za32(uint64_t, svuint16x2_t, svuint16x2_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_1x1_za32_s8_u8)))
void svmop4s_za32(uint64_t, svint8_t, svuint8_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_1x1_za32_u8_s8)))
void svmop4s_za32(uint64_t, svuint8_t, svint8_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_1x1_za32_bf16_bf16)))
void svmop4s_za32(uint64_t, svbfloat16_t, svbfloat16_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_1x1_za32_f16_f16)))
void svmop4s_za32(uint64_t, svfloat16_t, svfloat16_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_1x1_za32_f32_f32)))
void svmop4s_za32(uint64_t, svfloat32_t, svfloat32_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_1x1_za32_s8_s8)))
void svmop4s_za32(uint64_t, svint8_t, svint8_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_1x1_za32_s16_s16)))
void svmop4s_za32(uint64_t, svint16_t, svint16_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_1x1_za32_u8_u8)))
void svmop4s_za32(uint64_t, svuint8_t, svuint8_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_1x1_za32_u16_u16)))
void svmop4s_za32(uint64_t, svuint16_t, svuint16_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_1x2_za32_s8_u8)))
void svmop4s_za32(uint64_t, svint8_t, svuint8x2_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_1x2_za32_u8_s8)))
void svmop4s_za32(uint64_t, svuint8_t, svint8x2_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_1x2_za32_bf16_bf16)))
void svmop4s_za32(uint64_t, svbfloat16_t, svbfloat16x2_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_1x2_za32_f16_f16)))
void svmop4s_za32(uint64_t, svfloat16_t, svfloat16x2_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_1x2_za32_f32_f32)))
void svmop4s_za32(uint64_t, svfloat32_t, svfloat32x2_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_1x2_za32_s8_s8)))
void svmop4s_za32(uint64_t, svint8_t, svint8x2_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_1x2_za32_s16_s16)))
void svmop4s_za32(uint64_t, svint16_t, svint16x2_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_1x2_za32_u8_u8)))
void svmop4s_za32(uint64_t, svuint8_t, svuint8x2_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_1x2_za32_u16_u16)))
void svmop4s_za32(uint64_t, svuint16_t, svuint16x2_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_2x1_za32_s8_u8)))
void svmop4s_za32(uint64_t, svint8x2_t, svuint8_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_2x1_za32_u8_s8)))
void svmop4s_za32(uint64_t, svuint8x2_t, svint8_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_2x1_za32_bf16_bf16)))
void svmop4s_za32(uint64_t, svbfloat16x2_t, svbfloat16_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_2x1_za32_f16_f16)))
void svmop4s_za32(uint64_t, svfloat16x2_t, svfloat16_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_2x1_za32_f32_f32)))
void svmop4s_za32(uint64_t, svfloat32x2_t, svfloat32_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_2x1_za32_s8_s8)))
void svmop4s_za32(uint64_t, svint8x2_t, svint8_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_2x1_za32_s16_s16)))
void svmop4s_za32(uint64_t, svint16x2_t, svint16_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_2x1_za32_u8_u8)))
void svmop4s_za32(uint64_t, svuint8x2_t, svuint8_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_2x1_za32_u16_u16)))
void svmop4s_za32(uint64_t, svuint16x2_t, svuint16_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_2x2_za32_s8_u8)))
void svmop4s_za32(uint64_t, svint8x2_t, svuint8x2_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_2x2_za32_u8_s8)))
void svmop4s_za32(uint64_t, svuint8x2_t, svint8x2_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_2x2_za32_bf16_bf16)))
void svmop4s_za32(uint64_t, svbfloat16x2_t, svbfloat16x2_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_2x2_za32_f16_f16)))
void svmop4s_za32(uint64_t, svfloat16x2_t, svfloat16x2_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_2x2_za32_f32_f32)))
void svmop4s_za32(uint64_t, svfloat32x2_t, svfloat32x2_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_2x2_za32_s8_s8)))
void svmop4s_za32(uint64_t, svint8x2_t, svint8x2_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_2x2_za32_s16_s16)))
void svmop4s_za32(uint64_t, svint16x2_t, svint16x2_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_2x2_za32_u8_u8)))
void svmop4s_za32(uint64_t, svuint8x2_t, svuint8x2_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_2x2_za32_u16_u16)))
void svmop4s_za32(uint64_t, svuint16x2_t, svuint16x2_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_1x1_za16_bf16_bf16)))
void svmop4a_1x1_za16_bf16_bf16(uint64_t, svbfloat16_t, svbfloat16_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_1x2_za16_bf16_bf16)))
void svmop4a_1x2_za16_bf16_bf16(uint64_t, svbfloat16_t, svbfloat16x2_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_2x1_za16_bf16_bf16)))
void svmop4a_2x1_za16_bf16_bf16(uint64_t, svbfloat16x2_t, svbfloat16_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_2x2_za16_bf16_bf16)))
void svmop4a_2x2_za16_bf16_bf16(uint64_t, svbfloat16x2_t, svbfloat16x2_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_1x1_za16_bf16_bf16)))
void svmop4s_1x1_za16_bf16_bf16(uint64_t, svbfloat16_t, svbfloat16_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_1x2_za16_bf16_bf16)))
void svmop4s_1x2_za16_bf16_bf16(uint64_t, svbfloat16_t, svbfloat16x2_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_2x1_za16_bf16_bf16)))
void svmop4s_2x1_za16_bf16_bf16(uint64_t, svbfloat16x2_t, svbfloat16_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_2x2_za16_bf16_bf16)))
void svmop4s_2x2_za16_bf16_bf16(uint64_t, svbfloat16x2_t, svbfloat16x2_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_1x1_za16_bf16_bf16)))
void svmop4a_za16(uint64_t, svbfloat16_t, svbfloat16_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_1x2_za16_bf16_bf16)))
void svmop4a_za16(uint64_t, svbfloat16_t, svbfloat16x2_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_2x1_za16_bf16_bf16)))
void svmop4a_za16(uint64_t, svbfloat16x2_t, svbfloat16_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_2x2_za16_bf16_bf16)))
void svmop4a_za16(uint64_t, svbfloat16x2_t, svbfloat16x2_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_1x1_za16_bf16_bf16)))
void svmop4s_za16(uint64_t, svbfloat16_t, svbfloat16_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_1x2_za16_bf16_bf16)))
void svmop4s_za16(uint64_t, svbfloat16_t, svbfloat16x2_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_2x1_za16_bf16_bf16)))
void svmop4s_za16(uint64_t, svbfloat16x2_t, svbfloat16_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_2x2_za16_bf16_bf16)))
void svmop4s_za16(uint64_t, svbfloat16x2_t, svbfloat16x2_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_1x1_za16_f16_f16)))
void svmop4a_1x1_za16_f16_f16(uint64_t, svfloat16_t, svfloat16_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_1x2_za16_f16_f16)))
void svmop4a_1x2_za16_f16_f16(uint64_t, svfloat16_t, svfloat16x2_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_2x1_za16_f16_f16)))
void svmop4a_2x1_za16_f16_f16(uint64_t, svfloat16x2_t, svfloat16_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_2x2_za16_f16_f16)))
void svmop4a_2x2_za16_f16_f16(uint64_t, svfloat16x2_t, svfloat16x2_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_1x1_za16_f16_f16)))
void svmop4s_1x1_za16_f16_f16(uint64_t, svfloat16_t, svfloat16_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_1x2_za16_f16_f16)))
void svmop4s_1x2_za16_f16_f16(uint64_t, svfloat16_t, svfloat16x2_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_2x1_za16_f16_f16)))
void svmop4s_2x1_za16_f16_f16(uint64_t, svfloat16x2_t, svfloat16_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_2x2_za16_f16_f16)))
void svmop4s_2x2_za16_f16_f16(uint64_t, svfloat16x2_t, svfloat16x2_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_1x1_za16_f16_f16)))
void svmop4a_za16(uint64_t, svfloat16_t, svfloat16_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_1x2_za16_f16_f16)))
void svmop4a_za16(uint64_t, svfloat16_t, svfloat16x2_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_2x1_za16_f16_f16)))
void svmop4a_za16(uint64_t, svfloat16x2_t, svfloat16_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_2x2_za16_f16_f16)))
void svmop4a_za16(uint64_t, svfloat16x2_t, svfloat16x2_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_1x1_za16_f16_f16)))
void svmop4s_za16(uint64_t, svfloat16_t, svfloat16_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_1x2_za16_f16_f16)))
void svmop4s_za16(uint64_t, svfloat16_t, svfloat16x2_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_2x1_za16_f16_f16)))
void svmop4s_za16(uint64_t, svfloat16x2_t, svfloat16_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_2x2_za16_f16_f16)))
void svmop4s_za16(uint64_t, svfloat16x2_t, svfloat16x2_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_1x1_za64_f64_f64)))
void svmop4a_1x1_za64_f64_f64(uint64_t, svfloat64_t, svfloat64_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_1x2_za64_f64_f64)))
void svmop4a_1x2_za64_f64_f64(uint64_t, svfloat64_t, svfloat64x2_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_2x1_za64_f64_f64)))
void svmop4a_2x1_za64_f64_f64(uint64_t, svfloat64x2_t, svfloat64_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_2x2_za64_f64_f64)))
void svmop4a_2x2_za64_f64_f64(uint64_t, svfloat64x2_t, svfloat64x2_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_1x1_za64_f64_f64)))
void svmop4s_1x1_za64_f64_f64(uint64_t, svfloat64_t, svfloat64_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_1x2_za64_f64_f64)))
void svmop4s_1x2_za64_f64_f64(uint64_t, svfloat64_t, svfloat64x2_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_2x1_za64_f64_f64)))
void svmop4s_2x1_za64_f64_f64(uint64_t, svfloat64x2_t, svfloat64_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_2x2_za64_f64_f64)))
void svmop4s_2x2_za64_f64_f64(uint64_t, svfloat64x2_t, svfloat64x2_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_1x1_za64_f64_f64)))
void svmop4a_za64(uint64_t, svfloat64_t, svfloat64_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_1x2_za64_f64_f64)))
void svmop4a_za64(uint64_t, svfloat64_t, svfloat64x2_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_2x1_za64_f64_f64)))
void svmop4a_za64(uint64_t, svfloat64x2_t, svfloat64_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_2x2_za64_f64_f64)))
void svmop4a_za64(uint64_t, svfloat64x2_t, svfloat64x2_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_1x1_za64_f64_f64)))
void svmop4s_za64(uint64_t, svfloat64_t, svfloat64_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_1x2_za64_f64_f64)))
void svmop4s_za64(uint64_t, svfloat64_t, svfloat64x2_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_2x1_za64_f64_f64)))
void svmop4s_za64(uint64_t, svfloat64x2_t, svfloat64_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_2x2_za64_f64_f64)))
void svmop4s_za64(uint64_t, svfloat64x2_t, svfloat64x2_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_1x1_za16_mf8_mf8_fpm)))
void svmop4a_1x1_za16_mf8_mf8_fpm(uint64_t, svmfloat8_t, svmfloat8_t, fpm_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_1x2_za16_mf8_mf8_fpm)))
void svmop4a_1x2_za16_mf8_mf8_fpm(uint64_t, svmfloat8_t, svmfloat8x2_t, fpm_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_2x1_za16_mf8_mf8_fpm)))
void svmop4a_2x1_za16_mf8_mf8_fpm(uint64_t, svmfloat8x2_t, svmfloat8_t, fpm_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_2x2_za16_mf8_mf8_fpm)))
void svmop4a_2x2_za16_mf8_mf8_fpm(uint64_t, svmfloat8x2_t, svmfloat8x2_t, fpm_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_1x1_za16_mf8_mf8_fpm)))
void svmop4a_za16_fpm(uint64_t, svmfloat8_t, svmfloat8_t, fpm_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_1x2_za16_mf8_mf8_fpm)))
void svmop4a_za16_fpm(uint64_t, svmfloat8_t, svmfloat8x2_t, fpm_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_2x1_za16_mf8_mf8_fpm)))
void svmop4a_za16_fpm(uint64_t, svmfloat8x2_t, svmfloat8_t, fpm_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_2x2_za16_mf8_mf8_fpm)))
void svmop4a_za16_fpm(uint64_t, svmfloat8x2_t, svmfloat8x2_t, fpm_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_1x1_za32_mf8_mf8_fpm)))
void svmop4a_1x1_za32_mf8_mf8_fpm(uint64_t, svmfloat8_t, svmfloat8_t, fpm_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_1x2_za32_mf8_mf8_fpm)))
void svmop4a_1x2_za32_mf8_mf8_fpm(uint64_t, svmfloat8_t, svmfloat8x2_t, fpm_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_2x1_za32_mf8_mf8_fpm)))
void svmop4a_2x1_za32_mf8_mf8_fpm(uint64_t, svmfloat8x2_t, svmfloat8_t, fpm_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_2x2_za32_mf8_mf8_fpm)))
void svmop4a_2x2_za32_mf8_mf8_fpm(uint64_t, svmfloat8x2_t, svmfloat8x2_t, fpm_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_1x1_za32_mf8_mf8_fpm)))
void svmop4a_za32_fpm(uint64_t, svmfloat8_t, svmfloat8_t, fpm_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_1x2_za32_mf8_mf8_fpm)))
void svmop4a_za32_fpm(uint64_t, svmfloat8_t, svmfloat8x2_t, fpm_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_2x1_za32_mf8_mf8_fpm)))
void svmop4a_za32_fpm(uint64_t, svmfloat8x2_t, svmfloat8_t, fpm_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_2x2_za32_mf8_mf8_fpm)))
void svmop4a_za32_fpm(uint64_t, svmfloat8x2_t, svmfloat8x2_t, fpm_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_1x1_za64_s16_u16)))
void svmop4a_1x1_za64_s16_u16(uint64_t, svint16_t, svuint16_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_1x1_za64_u16_s16)))
void svmop4a_1x1_za64_u16_s16(uint64_t, svuint16_t, svint16_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_1x1_za64_s16_s16)))
void svmop4a_1x1_za64_s16_s16(uint64_t, svint16_t, svint16_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_1x1_za64_u16_u16)))
void svmop4a_1x1_za64_u16_u16(uint64_t, svuint16_t, svuint16_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_1x2_za64_s16_u16)))
void svmop4a_1x2_za64_s16_u16(uint64_t, svint16_t, svuint16x2_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_1x2_za64_u16_s16)))
void svmop4a_1x2_za64_u16_s16(uint64_t, svuint16_t, svint16x2_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_1x2_za64_s16_s16)))
void svmop4a_1x2_za64_s16_s16(uint64_t, svint16_t, svint16x2_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_1x2_za64_u16_u16)))
void svmop4a_1x2_za64_u16_u16(uint64_t, svuint16_t, svuint16x2_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_2x1_za64_s16_u16)))
void svmop4a_2x1_za64_s16_u16(uint64_t, svint16x2_t, svuint16_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_2x1_za64_u16_s16)))
void svmop4a_2x1_za64_u16_s16(uint64_t, svuint16x2_t, svint16_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_2x1_za64_s16_s16)))
void svmop4a_2x1_za64_s16_s16(uint64_t, svint16x2_t, svint16_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_2x1_za64_u16_u16)))
void svmop4a_2x1_za64_u16_u16(uint64_t, svuint16x2_t, svuint16_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_2x2_za64_s16_u16)))
void svmop4a_2x2_za64_s16_u16(uint64_t, svint16x2_t, svuint16x2_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_2x2_za64_u16_s16)))
void svmop4a_2x2_za64_u16_s16(uint64_t, svuint16x2_t, svint16x2_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_2x2_za64_s16_s16)))
void svmop4a_2x2_za64_s16_s16(uint64_t, svint16x2_t, svint16x2_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_2x2_za64_u16_u16)))
void svmop4a_2x2_za64_u16_u16(uint64_t, svuint16x2_t, svuint16x2_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_1x1_za64_s16_u16)))
void svmop4s_1x1_za64_s16_u16(uint64_t, svint16_t, svuint16_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_1x1_za64_u16_s16)))
void svmop4s_1x1_za64_u16_s16(uint64_t, svuint16_t, svint16_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_1x1_za64_s16_s16)))
void svmop4s_1x1_za64_s16_s16(uint64_t, svint16_t, svint16_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_1x1_za64_u16_u16)))
void svmop4s_1x1_za64_u16_u16(uint64_t, svuint16_t, svuint16_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_1x2_za64_s16_u16)))
void svmop4s_1x2_za64_s16_u16(uint64_t, svint16_t, svuint16x2_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_1x2_za64_u16_s16)))
void svmop4s_1x2_za64_u16_s16(uint64_t, svuint16_t, svint16x2_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_1x2_za64_s16_s16)))
void svmop4s_1x2_za64_s16_s16(uint64_t, svint16_t, svint16x2_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_1x2_za64_u16_u16)))
void svmop4s_1x2_za64_u16_u16(uint64_t, svuint16_t, svuint16x2_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_2x1_za64_s16_u16)))
void svmop4s_2x1_za64_s16_u16(uint64_t, svint16x2_t, svuint16_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_2x1_za64_u16_s16)))
void svmop4s_2x1_za64_u16_s16(uint64_t, svuint16x2_t, svint16_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_2x1_za64_s16_s16)))
void svmop4s_2x1_za64_s16_s16(uint64_t, svint16x2_t, svint16_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_2x1_za64_u16_u16)))
void svmop4s_2x1_za64_u16_u16(uint64_t, svuint16x2_t, svuint16_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_2x2_za64_s16_u16)))
void svmop4s_2x2_za64_s16_u16(uint64_t, svint16x2_t, svuint16x2_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_2x2_za64_u16_s16)))
void svmop4s_2x2_za64_u16_s16(uint64_t, svuint16x2_t, svint16x2_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_2x2_za64_s16_s16)))
void svmop4s_2x2_za64_s16_s16(uint64_t, svint16x2_t, svint16x2_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_2x2_za64_u16_u16)))
void svmop4s_2x2_za64_u16_u16(uint64_t, svuint16x2_t, svuint16x2_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_1x1_za64_s16_u16)))
void svmop4a_za64(uint64_t, svint16_t, svuint16_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_1x1_za64_u16_s16)))
void svmop4a_za64(uint64_t, svuint16_t, svint16_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_1x1_za64_s16_s16)))
void svmop4a_za64(uint64_t, svint16_t, svint16_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_1x1_za64_u16_u16)))
void svmop4a_za64(uint64_t, svuint16_t, svuint16_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_1x2_za64_s16_u16)))
void svmop4a_za64(uint64_t, svint16_t, svuint16x2_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_1x2_za64_u16_s16)))
void svmop4a_za64(uint64_t, svuint16_t, svint16x2_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_1x2_za64_s16_s16)))
void svmop4a_za64(uint64_t, svint16_t, svint16x2_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_1x2_za64_u16_u16)))
void svmop4a_za64(uint64_t, svuint16_t, svuint16x2_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_2x1_za64_s16_u16)))
void svmop4a_za64(uint64_t, svint16x2_t, svuint16_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_2x1_za64_u16_s16)))
void svmop4a_za64(uint64_t, svuint16x2_t, svint16_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_2x1_za64_s16_s16)))
void svmop4a_za64(uint64_t, svint16x2_t, svint16_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_2x1_za64_u16_u16)))
void svmop4a_za64(uint64_t, svuint16x2_t, svuint16_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_2x2_za64_s16_u16)))
void svmop4a_za64(uint64_t, svint16x2_t, svuint16x2_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_2x2_za64_u16_s16)))
void svmop4a_za64(uint64_t, svuint16x2_t, svint16x2_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_2x2_za64_s16_s16)))
void svmop4a_za64(uint64_t, svint16x2_t, svint16x2_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4a_2x2_za64_u16_u16)))
void svmop4a_za64(uint64_t, svuint16x2_t, svuint16x2_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_1x1_za64_s16_u16)))
void svmop4s_za64(uint64_t, svint16_t, svuint16_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_1x1_za64_u16_s16)))
void svmop4s_za64(uint64_t, svuint16_t, svint16_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_1x1_za64_s16_s16)))
void svmop4s_za64(uint64_t, svint16_t, svint16_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_1x1_za64_u16_u16)))
void svmop4s_za64(uint64_t, svuint16_t, svuint16_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_1x2_za64_s16_u16)))
void svmop4s_za64(uint64_t, svint16_t, svuint16x2_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_1x2_za64_u16_s16)))
void svmop4s_za64(uint64_t, svuint16_t, svint16x2_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_1x2_za64_s16_s16)))
void svmop4s_za64(uint64_t, svint16_t, svint16x2_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_1x2_za64_u16_u16)))
void svmop4s_za64(uint64_t, svuint16_t, svuint16x2_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_2x1_za64_s16_u16)))
void svmop4s_za64(uint64_t, svint16x2_t, svuint16_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_2x1_za64_u16_s16)))
void svmop4s_za64(uint64_t, svuint16x2_t, svint16_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_2x1_za64_s16_s16)))
void svmop4s_za64(uint64_t, svint16x2_t, svint16_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_2x1_za64_u16_u16)))
void svmop4s_za64(uint64_t, svuint16x2_t, svuint16_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_2x2_za64_s16_u16)))
void svmop4s_za64(uint64_t, svint16x2_t, svuint16x2_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_2x2_za64_u16_s16)))
void svmop4s_za64(uint64_t, svuint16x2_t, svint16x2_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_2x2_za64_s16_s16)))
void svmop4s_za64(uint64_t, svint16x2_t, svint16x2_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmop4s_2x2_za64_u16_u16)))
void svmop4s_za64(uint64_t, svuint16x2_t, svuint16x2_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svtmopa_lane_za32_s8_u8)))
void svtmopa_lane_za32_s8_u8(uint64_t, svint8x2_t, svuint8_t, svuint8_t, uint64_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svtmopa_lane_za32_u8_s8)))
void svtmopa_lane_za32_u8_s8(uint64_t, svuint8x2_t, svint8_t, svuint8_t, uint64_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svtmopa_lane_za32_s8_s8)))
void svtmopa_lane_za32_s8_s8(uint64_t, svint8x2_t, svint8_t, svuint8_t, uint64_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svtmopa_lane_za32_s16_s16)))
void svtmopa_lane_za32_s16_s16(uint64_t, svint16x2_t, svint16_t, svuint8_t, uint64_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svtmopa_lane_za32_bf16_bf16)))
void svtmopa_lane_za32_bf16_bf16(uint64_t, svbfloat16x2_t, svbfloat16_t, svuint8_t, uint64_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svtmopa_lane_za32_f32_f32)))
void svtmopa_lane_za32_f32_f32(uint64_t, svfloat32x2_t, svfloat32_t, svuint8_t, uint64_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svtmopa_lane_za32_f16_f16)))
void svtmopa_lane_za32_f16_f16(uint64_t, svfloat16x2_t, svfloat16_t, svuint8_t, uint64_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svtmopa_lane_za32_u8_u8)))
void svtmopa_lane_za32_u8_u8(uint64_t, svuint8x2_t, svuint8_t, svuint8_t, uint64_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svtmopa_lane_za32_u16_u16)))
void svtmopa_lane_za32_u16_u16(uint64_t, svuint16x2_t, svuint16_t, svuint8_t, uint64_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svtmopa_lane_za32_s8_u8)))
void svtmopa_lane_za32(uint64_t, svint8x2_t, svuint8_t, svuint8_t, uint64_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svtmopa_lane_za32_u8_s8)))
void svtmopa_lane_za32(uint64_t, svuint8x2_t, svint8_t, svuint8_t, uint64_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svtmopa_lane_za32_s8_s8)))
void svtmopa_lane_za32(uint64_t, svint8x2_t, svint8_t, svuint8_t, uint64_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svtmopa_lane_za32_s16_s16)))
void svtmopa_lane_za32(uint64_t, svint16x2_t, svint16_t, svuint8_t, uint64_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svtmopa_lane_za32_bf16_bf16)))
void svtmopa_lane_za32(uint64_t, svbfloat16x2_t, svbfloat16_t, svuint8_t, uint64_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svtmopa_lane_za32_f32_f32)))
void svtmopa_lane_za32(uint64_t, svfloat32x2_t, svfloat32_t, svuint8_t, uint64_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svtmopa_lane_za32_f16_f16)))
void svtmopa_lane_za32(uint64_t, svfloat16x2_t, svfloat16_t, svuint8_t, uint64_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svtmopa_lane_za32_u8_u8)))
void svtmopa_lane_za32(uint64_t, svuint8x2_t, svuint8_t, svuint8_t, uint64_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svtmopa_lane_za32_u16_u16)))
void svtmopa_lane_za32(uint64_t, svuint16x2_t, svuint16_t, svuint8_t, uint64_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svtmopa_lane_za16_bf16_bf16)))
void svtmopa_lane_za16_bf16_bf16(uint64_t, svbfloat16x2_t, svbfloat16_t, svuint8_t, uint64_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svtmopa_lane_za16_bf16_bf16)))
void svtmopa_lane_za16(uint64_t, svbfloat16x2_t, svbfloat16_t, svuint8_t, uint64_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svtmopa_lane_za16_f16_f16)))
void svtmopa_lane_za16_f16_f16(uint64_t, svfloat16x2_t, svfloat16_t, svuint8_t, uint64_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svtmopa_lane_za16_f16_f16)))
void svtmopa_lane_za16(uint64_t, svfloat16x2_t, svfloat16_t, svuint8_t, uint64_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svtmopa_lane_za16_mf8_mf8_fpm)))
void svtmopa_lane_za16_mf8_mf8_fpm(uint64_t, svmfloat8x2_t, svmfloat8_t, svuint8_t, uint64_t, fpm_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svtmopa_lane_za16_mf8_mf8_fpm)))
void svtmopa_lane_za16_fpm(uint64_t, svmfloat8x2_t, svmfloat8_t, svuint8_t, uint64_t, fpm_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svtmopa_lane_za32_mf8_mf8_fpm)))
void svtmopa_lane_za32_mf8_mf8_fpm(uint64_t, svmfloat8x2_t, svmfloat8_t, svuint8_t, uint64_t, fpm_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svtmopa_lane_za32_mf8_mf8_fpm)))
void svtmopa_lane_za32_fpm(uint64_t, svmfloat8x2_t, svmfloat8_t, svuint8_t, uint64_t, fpm_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_hor_za128_u8)))
svuint8_t svreadz_hor_za128_u8(uint64_t, uint32_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_hor_za128_u32)))
@ -2804,6 +3478,8 @@ __ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_hor_za128_s3
svint32_t svreadz_hor_za128_s32(uint64_t, uint32_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_hor_za128_s64)))
svint64_t svreadz_hor_za128_s64(uint64_t, uint32_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_hor_za128_mf8)))
svmfloat8_t svreadz_hor_za128_mf8(uint64_t, uint32_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_hor_za128_s16)))
svint16_t svreadz_hor_za128_s16(uint64_t, uint32_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_hor_za16_u16)))
@ -2870,14 +3546,20 @@ __ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_hor_za8_u8))
svuint8_t svreadz_hor_za8_u8(uint64_t, uint32_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_hor_za8_s8)))
svint8_t svreadz_hor_za8_s8(uint64_t, uint32_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_hor_za8_mf8)))
svmfloat8_t svreadz_hor_za8_mf8(uint64_t, uint32_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_hor_za8_u8_vg2)))
svuint8x2_t svreadz_hor_za8_u8_vg2(uint64_t, uint32_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_hor_za8_s8_vg2)))
svint8x2_t svreadz_hor_za8_s8_vg2(uint64_t, uint32_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_hor_za8_mf8_vg2)))
svmfloat8x2_t svreadz_hor_za8_mf8_vg2(uint64_t, uint32_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_hor_za8_u8_vg4)))
svuint8x4_t svreadz_hor_za8_u8_vg4(uint64_t, uint32_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_hor_za8_s8_vg4)))
svint8x4_t svreadz_hor_za8_s8_vg4(uint64_t, uint32_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_hor_za8_mf8_vg4)))
svmfloat8x4_t svreadz_hor_za8_mf8_vg4(uint64_t, uint32_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_ver_za128_u8)))
svuint8_t svreadz_ver_za128_u8(uint64_t, uint32_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_ver_za128_u32)))
@ -2900,6 +3582,8 @@ __ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_ver_za128_s3
svint32_t svreadz_ver_za128_s32(uint64_t, uint32_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_ver_za128_s64)))
svint64_t svreadz_ver_za128_s64(uint64_t, uint32_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_ver_za128_mf8)))
svmfloat8_t svreadz_ver_za128_mf8(uint64_t, uint32_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_ver_za128_s16)))
svint16_t svreadz_ver_za128_s16(uint64_t, uint32_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_ver_za16_u16)))
@ -2966,14 +3650,20 @@ __ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_ver_za8_u8))
svuint8_t svreadz_ver_za8_u8(uint64_t, uint32_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_ver_za8_s8)))
svint8_t svreadz_ver_za8_s8(uint64_t, uint32_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_ver_za8_mf8)))
svmfloat8_t svreadz_ver_za8_mf8(uint64_t, uint32_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_ver_za8_u8_vg2)))
svuint8x2_t svreadz_ver_za8_u8_vg2(uint64_t, uint32_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_ver_za8_s8_vg2)))
svint8x2_t svreadz_ver_za8_s8_vg2(uint64_t, uint32_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_ver_za8_mf8_vg2)))
svmfloat8x2_t svreadz_ver_za8_mf8_vg2(uint64_t, uint32_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_ver_za8_u8_vg4)))
svuint8x4_t svreadz_ver_za8_u8_vg4(uint64_t, uint32_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_ver_za8_s8_vg4)))
svint8x4_t svreadz_ver_za8_s8_vg4(uint64_t, uint32_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_ver_za8_mf8_vg4)))
svmfloat8x4_t svreadz_ver_za8_mf8_vg4(uint64_t, uint32_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_za16_u16_vg1x2)))
svuint16x2_t svreadz_za16_u16_vg1x2(uint32_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_za16_bf16_vg1x2)))
@ -3018,10 +3708,14 @@ __ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_za8_u8_vg1x2
svuint8x2_t svreadz_za8_u8_vg1x2(uint32_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_za8_s8_vg1x2)))
svint8x2_t svreadz_za8_s8_vg1x2(uint32_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_za8_mf8_vg1x2)))
svmfloat8x2_t svreadz_za8_mf8_vg1x2(uint32_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_za8_u8_vg1x4)))
svuint8x4_t svreadz_za8_u8_vg1x4(uint32_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_za8_s8_vg1x4)))
svint8x4_t svreadz_za8_s8_vg1x4(uint32_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_za8_mf8_vg1x4)))
svmfloat8x4_t svreadz_za8_mf8_vg1x4(uint32_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svzero_za64_vg1x2)))
void svzero_za64_vg1x2(uint32_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svzero_za64_vg1x4)))

4540
lib/include/arm_sve.h vendored

File diff suppressed because it is too large Load Diff

View File

@ -441,8 +441,8 @@ _mm512_maskz_sqrt_pbh(__mmask32 __U, __m512bh __A) {
static __inline__ __m512bh __DEFAULT_FN_ATTRS512
_mm512_fmadd_pbh(__m512bh __A, __m512bh __B, __m512bh __C) {
return (__m512bh)__builtin_ia32_vfmaddnepbh512((__v32bf)__A, (__v32bf)__B,
(__v32bf)__C);
return (__m512bh)__builtin_ia32_vfmaddbf16512((__v32bf)__A, (__v32bf)__B,
(__v32bf)__C);
}
static __inline__ __m512bh __DEFAULT_FN_ATTRS512
@ -469,8 +469,8 @@ static __inline__ __m512bh __DEFAULT_FN_ATTRS512 _mm512_maskz_fmadd_pbh(
static __inline__ __m512bh __DEFAULT_FN_ATTRS512
_mm512_fmsub_pbh(__m512bh __A, __m512bh __B, __m512bh __C) {
return (__m512bh)__builtin_ia32_vfmaddnepbh512((__v32bf)__A, (__v32bf)__B,
-(__v32bf)__C);
return (__m512bh)__builtin_ia32_vfmaddbf16512((__v32bf)__A, (__v32bf)__B,
-(__v32bf)__C);
}
static __inline__ __m512bh __DEFAULT_FN_ATTRS512
@ -497,8 +497,8 @@ static __inline__ __m512bh __DEFAULT_FN_ATTRS512 _mm512_maskz_fmsub_pbh(
static __inline__ __m512bh __DEFAULT_FN_ATTRS512
_mm512_fnmadd_pbh(__m512bh __A, __m512bh __B, __m512bh __C) {
return (__m512bh)__builtin_ia32_vfmaddnepbh512((__v32bf)__A, -(__v32bf)__B,
(__v32bf)__C);
return (__m512bh)__builtin_ia32_vfmaddbf16512((__v32bf)__A, -(__v32bf)__B,
(__v32bf)__C);
}
static __inline__ __m512bh __DEFAULT_FN_ATTRS512 _mm512_mask_fnmadd_pbh(
@ -527,8 +527,8 @@ static __inline__ __m512bh __DEFAULT_FN_ATTRS512 _mm512_maskz_fnmadd_pbh(
static __inline__ __m512bh __DEFAULT_FN_ATTRS512
_mm512_fnmsub_pbh(__m512bh __A, __m512bh __B, __m512bh __C) {
return (__m512bh)__builtin_ia32_vfmaddnepbh512((__v32bf)__A, -(__v32bf)__B,
-(__v32bf)__C);
return (__m512bh)__builtin_ia32_vfmaddbf16512((__v32bf)__A, -(__v32bf)__B,
-(__v32bf)__C);
}
static __inline__ __m512bh __DEFAULT_FN_ATTRS512 _mm512_mask_fnmsub_pbh(

View File

@ -78,20 +78,20 @@ _mm512_maskz_cvtbiasph_bf8(__mmask32 __U, __m512i __A, __m512h __B) {
}
static __inline__ __m256i __DEFAULT_FN_ATTRS512
_mm512_cvtbiassph_bf8(__m512i __A, __m512h __B) {
_mm512_cvts_biasph_bf8(__m512i __A, __m512h __B) {
return (__m256i)__builtin_ia32_vcvtbiasph2bf8s_512_mask(
(__v64qi)__A, (__v32hf)__B, (__v32qi)_mm256_undefined_si256(),
(__mmask32)-1);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS512 _mm512_mask_cvtbiassph_bf8(
static __inline__ __m256i __DEFAULT_FN_ATTRS512 _mm512_mask_cvts_biasph_bf8(
__m256i __W, __mmask32 __U, __m512i __A, __m512h __B) {
return (__m256i)__builtin_ia32_vcvtbiasph2bf8s_512_mask(
(__v64qi)__A, (__v32hf)__B, (__v32qi)(__m256i)__W, (__mmask32)__U);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS512
_mm512_maskz_cvtbiassph_bf8(__mmask32 __U, __m512i __A, __m512h __B) {
_mm512_maskz_cvts_biasph_bf8(__mmask32 __U, __m512i __A, __m512h __B) {
return (__m256i)__builtin_ia32_vcvtbiasph2bf8s_512_mask(
(__v64qi)__A, (__v32hf)__B, (__v32qi)(__m256i)_mm256_setzero_si256(),
(__mmask32)__U);
@ -118,20 +118,20 @@ _mm512_maskz_cvtbiasph_hf8(__mmask32 __U, __m512i __A, __m512h __B) {
}
static __inline__ __m256i __DEFAULT_FN_ATTRS512
_mm512_cvtbiassph_hf8(__m512i __A, __m512h __B) {
_mm512_cvts_biasph_hf8(__m512i __A, __m512h __B) {
return (__m256i)__builtin_ia32_vcvtbiasph2hf8s_512_mask(
(__v64qi)__A, (__v32hf)__B, (__v32qi)_mm256_undefined_si256(),
(__mmask32)-1);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS512 _mm512_mask_cvtbiassph_hf8(
static __inline__ __m256i __DEFAULT_FN_ATTRS512 _mm512_mask_cvts_biasph_hf8(
__m256i __W, __mmask32 __U, __m512i __A, __m512h __B) {
return (__m256i)__builtin_ia32_vcvtbiasph2hf8s_512_mask(
(__v64qi)__A, (__v32hf)__B, (__v32qi)(__m256i)__W, (__mmask32)__U);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS512
_mm512_maskz_cvtbiassph_hf8(__mmask32 __U, __m512i __A, __m512h __B) {
_mm512_maskz_cvts_biasph_hf8(__mmask32 __U, __m512i __A, __m512h __B) {
return (__m256i)__builtin_ia32_vcvtbiasph2hf8s_512_mask(
(__v64qi)__A, (__v32hf)__B, (__v32qi)(__m256i)_mm256_setzero_si256(),
(__mmask32)__U);
@ -157,21 +157,21 @@ _mm512_maskz_cvt2ph_bf8(__mmask64 __U, __m512h __A, __m512h __B) {
}
static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_cvts2ph_bf8(__m512h __A, __m512h __B) {
_mm512_cvts_2ph_bf8(__m512h __A, __m512h __B) {
return (__m512i)__builtin_ia32_vcvt2ph2bf8s_512((__v32hf)(__A),
(__v32hf)(__B));
}
static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_mask_cvts2ph_bf8(__m512i __W, __mmask64 __U, __m512h __A, __m512h __B) {
_mm512_mask_cvts_2ph_bf8(__m512i __W, __mmask64 __U, __m512h __A, __m512h __B) {
return (__m512i)__builtin_ia32_selectb_512(
(__mmask64)__U, (__v64qi)_mm512_cvts2ph_bf8(__A, __B), (__v64qi)__W);
(__mmask64)__U, (__v64qi)_mm512_cvts_2ph_bf8(__A, __B), (__v64qi)__W);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_maskz_cvts2ph_bf8(__mmask64 __U, __m512h __A, __m512h __B) {
_mm512_maskz_cvts_2ph_bf8(__mmask64 __U, __m512h __A, __m512h __B) {
return (__m512i)__builtin_ia32_selectb_512(
(__mmask64)__U, (__v64qi)_mm512_cvts2ph_bf8(__A, __B),
(__mmask64)__U, (__v64qi)_mm512_cvts_2ph_bf8(__A, __B),
(__v64qi)(__m512i)_mm512_setzero_si512());
}
@ -195,37 +195,37 @@ _mm512_maskz_cvt2ph_hf8(__mmask64 __U, __m512h __A, __m512h __B) {
}
static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_cvts2ph_hf8(__m512h __A, __m512h __B) {
_mm512_cvts_2ph_hf8(__m512h __A, __m512h __B) {
return (__m512i)__builtin_ia32_vcvt2ph2hf8s_512((__v32hf)(__A),
(__v32hf)(__B));
}
static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_mask_cvts2ph_hf8(__m512i __W, __mmask64 __U, __m512h __A, __m512h __B) {
_mm512_mask_cvts_2ph_hf8(__m512i __W, __mmask64 __U, __m512h __A, __m512h __B) {
return (__m512i)__builtin_ia32_selectb_512(
(__mmask64)__U, (__v64qi)_mm512_cvts2ph_hf8(__A, __B), (__v64qi)__W);
(__mmask64)__U, (__v64qi)_mm512_cvts_2ph_hf8(__A, __B), (__v64qi)__W);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_maskz_cvts2ph_hf8(__mmask64 __U, __m512h __A, __m512h __B) {
_mm512_maskz_cvts_2ph_hf8(__mmask64 __U, __m512h __A, __m512h __B) {
return (__m512i)__builtin_ia32_selectb_512(
(__mmask64)__U, (__v64qi)_mm512_cvts2ph_hf8(__A, __B),
(__mmask64)__U, (__v64qi)_mm512_cvts_2ph_hf8(__A, __B),
(__v64qi)(__m512i)_mm512_setzero_si512());
}
static __inline__ __m512h __DEFAULT_FN_ATTRS512 _mm512_cvthf8(__m256i __A) {
static __inline__ __m512h __DEFAULT_FN_ATTRS512 _mm512_cvthf8_ph(__m256i __A) {
return (__m512h)__builtin_ia32_vcvthf8_2ph512_mask(
(__v32qi)__A, (__v32hf)(__m512h)_mm512_undefined_ph(), (__mmask32)-1);
}
static __inline__ __m512h __DEFAULT_FN_ATTRS512
_mm512_mask_cvthf8(__m512h __W, __mmask32 __U, __m256i __A) {
_mm512_mask_cvthf8_ph(__m512h __W, __mmask32 __U, __m256i __A) {
return (__m512h)__builtin_ia32_vcvthf8_2ph512_mask(
(__v32qi)__A, (__v32hf)(__m512h)__W, (__mmask32)__U);
}
static __inline__ __m512h __DEFAULT_FN_ATTRS512
_mm512_maskz_cvthf8(__mmask32 __U, __m256i __A) {
_mm512_maskz_cvthf8_ph(__mmask32 __U, __m256i __A) {
return (__m512h)__builtin_ia32_vcvthf8_2ph512_mask(
(__v32qi)__A, (__v32hf)(__m512h)_mm512_setzero_ph(), (__mmask32)__U);
}
@ -247,19 +247,20 @@ _mm512_maskz_cvtph_bf8(__mmask32 __U, __m512h __A) {
(__v32hf)__A, (__v32qi)(__m256i)_mm256_setzero_si256(), (__mmask32)__U);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS512 _mm512_cvtsph_bf8(__m512h __A) {
static __inline__ __m256i __DEFAULT_FN_ATTRS512
_mm512_cvts_ph_bf8(__m512h __A) {
return (__m256i)__builtin_ia32_vcvtph2bf8s_512_mask(
(__v32hf)__A, (__v32qi)(__m256i)_mm256_undefined_si256(), (__mmask32)-1);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS512
_mm512_mask_cvtsph_bf8(__m256i __W, __mmask32 __U, __m512h __A) {
_mm512_mask_cvts_ph_bf8(__m256i __W, __mmask32 __U, __m512h __A) {
return (__m256i)__builtin_ia32_vcvtph2bf8s_512_mask(
(__v32hf)__A, (__v32qi)(__m256i)__W, (__mmask32)__U);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS512
_mm512_maskz_cvtsph_bf8(__mmask32 __U, __m512h __A) {
_mm512_maskz_cvts_ph_bf8(__mmask32 __U, __m512h __A) {
return (__m256i)__builtin_ia32_vcvtph2bf8s_512_mask(
(__v32hf)__A, (__v32qi)(__m256i)_mm256_setzero_si256(), (__mmask32)__U);
}
@ -281,19 +282,20 @@ _mm512_maskz_cvtph_hf8(__mmask32 __U, __m512h __A) {
(__v32hf)__A, (__v32qi)(__m256i)_mm256_setzero_si256(), (__mmask32)__U);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS512 _mm512_cvtsph_hf8(__m512h __A) {
static __inline__ __m256i __DEFAULT_FN_ATTRS512
_mm512_cvts_ph_hf8(__m512h __A) {
return (__m256i)__builtin_ia32_vcvtph2hf8s_512_mask(
(__v32hf)__A, (__v32qi)(__m256i)_mm256_undefined_si256(), (__mmask32)-1);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS512
_mm512_mask_cvtsph_hf8(__m256i __W, __mmask32 __U, __m512h __A) {
_mm512_mask_cvts_ph_hf8(__m256i __W, __mmask32 __U, __m512h __A) {
return (__m256i)__builtin_ia32_vcvtph2hf8s_512_mask(
(__v32hf)__A, (__v32qi)(__m256i)__W, (__mmask32)__U);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS512
_mm512_maskz_cvtsph_hf8(__mmask32 __U, __m512h __A) {
_mm512_maskz_cvts_ph_hf8(__mmask32 __U, __m512h __A) {
return (__m256i)__builtin_ia32_vcvtph2hf8s_512_mask(
(__v32hf)__A, (__v32qi)(__m256i)_mm256_setzero_si256(), (__mmask32)__U);
}

View File

@ -20,20 +20,21 @@
__min_vector_width__(512)))
// 512 bit : Double -> Int
static __inline__ __m256i __DEFAULT_FN_ATTRS _mm512_cvttspd_epi32(__m512d __A) {
static __inline__ __m256i __DEFAULT_FN_ATTRS
_mm512_cvtts_pd_epi32(__m512d __A) {
return ((__m256i)__builtin_ia32_vcvttpd2dqs512_round_mask(
(__v8df)__A, (__v8si)_mm256_undefined_si256(), (__mmask8)-1,
_MM_FROUND_CUR_DIRECTION));
}
static __inline__ __m256i __DEFAULT_FN_ATTRS
_mm512_mask_cvttspd_epi32(__m256i __W, __mmask8 __U, __m512d __A) {
_mm512_mask_cvtts_pd_epi32(__m256i __W, __mmask8 __U, __m512d __A) {
return ((__m256i)__builtin_ia32_vcvttpd2dqs512_round_mask(
(__v8df)__A, (__v8si)__W, __U, _MM_FROUND_CUR_DIRECTION));
}
static __inline__ __m256i __DEFAULT_FN_ATTRS
_mm512_maskz_cvttspd_epi32(__mmask8 __U, __m512d __A) {
_mm512_maskz_cvtts_pd_epi32(__mmask8 __U, __m512d __A) {
return ((__m256i)__builtin_ia32_vcvttpd2dqs512_round_mask(
(__v8df)__A, (__v8si)_mm256_setzero_si256(), __U,
_MM_FROUND_CUR_DIRECTION));
@ -55,20 +56,21 @@ _mm512_maskz_cvttspd_epi32(__mmask8 __U, __m512d __A) {
(const int)(__R)))
// 512 bit : Double -> uInt
static __inline__ __m256i __DEFAULT_FN_ATTRS _mm512_cvttspd_epu32(__m512d __A) {
static __inline__ __m256i __DEFAULT_FN_ATTRS
_mm512_cvtts_pd_epu32(__m512d __A) {
return ((__m256i)__builtin_ia32_vcvttpd2udqs512_round_mask(
(__v8df)__A, (__v8si)_mm256_undefined_si256(), (__mmask8)-1,
_MM_FROUND_CUR_DIRECTION));
}
static __inline__ __m256i __DEFAULT_FN_ATTRS
_mm512_mask_cvttspd_epu32(__m256i __W, __mmask8 __U, __m512d __A) {
_mm512_mask_cvtts_pd_epu32(__m256i __W, __mmask8 __U, __m512d __A) {
return ((__m256i)__builtin_ia32_vcvttpd2udqs512_round_mask(
(__v8df)__A, (__v8si)__W, __U, _MM_FROUND_CUR_DIRECTION));
}
static __inline__ __m256i __DEFAULT_FN_ATTRS
_mm512_maskz_cvttspd_epu32(__mmask8 __U, __m512d __A) {
_mm512_maskz_cvtts_pd_epu32(__mmask8 __U, __m512d __A) {
return ((__m256i)__builtin_ia32_vcvttpd2udqs512_round_mask(
(__v8df)__A, (__v8si)_mm256_setzero_si256(), __U,
_MM_FROUND_CUR_DIRECTION));
@ -91,18 +93,19 @@ _mm512_maskz_cvttspd_epu32(__mmask8 __U, __m512d __A) {
// 512 bit : Double -> Long
static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_cvttspd_epi64(__m512d __A) {
static __inline__ __m512i __DEFAULT_FN_ATTRS
_mm512_cvtts_pd_epi64(__m512d __A) {
return ((__m512i)__builtin_ia32_vcvttpd2qqs512_round_mask(
(__v8df)__A, (__v8di)_mm512_undefined_epi32(), (__mmask8)-1,
_MM_FROUND_CUR_DIRECTION));
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
_mm512_mask_cvttspd_epi64(__m512i __W, __mmask8 __U, __m512d __A) {
_mm512_mask_cvtts_pd_epi64(__m512i __W, __mmask8 __U, __m512d __A) {
return ((__m512i)__builtin_ia32_vcvttpd2qqs512_round_mask(
(__v8df)__A, (__v8di)__W, __U, _MM_FROUND_CUR_DIRECTION));
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
_mm512_maskz_cvttspd_epi64(__mmask8 __U, __m512d __A) {
_mm512_maskz_cvtts_pd_epi64(__mmask8 __U, __m512d __A) {
return ((__m512i)__builtin_ia32_vcvttpd2qqs512_round_mask(
(__v8df)__A, (__v8di)_mm512_setzero_si512(), __U,
_MM_FROUND_CUR_DIRECTION));
@ -125,20 +128,21 @@ _mm512_maskz_cvttspd_epi64(__mmask8 __U, __m512d __A) {
// 512 bit : Double -> ULong
static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_cvttspd_epu64(__m512d __A) {
static __inline__ __m512i __DEFAULT_FN_ATTRS
_mm512_cvtts_pd_epu64(__m512d __A) {
return ((__m512i)__builtin_ia32_vcvttpd2uqqs512_round_mask(
(__v8df)__A, (__v8di)_mm512_undefined_epi32(), (__mmask8)-1,
_MM_FROUND_CUR_DIRECTION));
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
_mm512_mask_cvttspd_epu64(__m512i __W, __mmask8 __U, __m512d __A) {
_mm512_mask_cvtts_pd_epu64(__m512i __W, __mmask8 __U, __m512d __A) {
return ((__m512i)__builtin_ia32_vcvttpd2uqqs512_round_mask(
(__v8df)__A, (__v8di)__W, __U, _MM_FROUND_CUR_DIRECTION));
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
_mm512_maskz_cvttspd_epu64(__mmask8 __U, __m512d __A) {
_mm512_maskz_cvtts_pd_epu64(__mmask8 __U, __m512d __A) {
return ((__m512i)__builtin_ia32_vcvttpd2uqqs512_round_mask(
(__v8df)__A, (__v8di)_mm512_setzero_si512(), __U,
_MM_FROUND_CUR_DIRECTION));
@ -160,20 +164,20 @@ _mm512_maskz_cvttspd_epu64(__mmask8 __U, __m512d __A) {
(const int)(__R)))
// 512 bit: Float -> int
static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_cvttsps_epi32(__m512 __A) {
static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_cvtts_ps_epi32(__m512 __A) {
return ((__m512i)__builtin_ia32_vcvttps2dqs512_round_mask(
(__v16sf)(__A), (__v16si)_mm512_undefined_epi32(), (__mmask16)-1,
_MM_FROUND_CUR_DIRECTION));
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
_mm512_mask_cvttsps_epi32(__m512i __W, __mmask16 __U, __m512 __A) {
_mm512_mask_cvtts_ps_epi32(__m512i __W, __mmask16 __U, __m512 __A) {
return ((__m512i)__builtin_ia32_vcvttps2dqs512_round_mask(
(__v16sf)(__A), (__v16si)(__W), __U, _MM_FROUND_CUR_DIRECTION));
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
_mm512_maskz_cvttsps_epi32(__mmask16 __U, __m512 __A) {
_mm512_maskz_cvtts_ps_epi32(__mmask16 __U, __m512 __A) {
return ((__m512i)__builtin_ia32_vcvttps2dqs512_round_mask(
(__v16sf)(__A), (__v16si)_mm512_setzero_si512(), __U,
_MM_FROUND_CUR_DIRECTION));
@ -195,20 +199,20 @@ _mm512_maskz_cvttsps_epi32(__mmask16 __U, __m512 __A) {
(__mmask16)(__U), (const int)(__R)))
// 512 bit: Float -> uint
static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_cvttsps_epu32(__m512 __A) {
static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_cvtts_ps_epu32(__m512 __A) {
return ((__m512i)__builtin_ia32_vcvttps2udqs512_round_mask(
(__v16sf)(__A), (__v16si)_mm512_undefined_epi32(), (__mmask16)-1,
_MM_FROUND_CUR_DIRECTION));
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
_mm512_mask_cvttsps_epu32(__m512i __W, __mmask16 __U, __m512 __A) {
_mm512_mask_cvtts_ps_epu32(__m512i __W, __mmask16 __U, __m512 __A) {
return ((__m512i)__builtin_ia32_vcvttps2udqs512_round_mask(
(__v16sf)(__A), (__v16si)(__W), __U, _MM_FROUND_CUR_DIRECTION));
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
_mm512_maskz_cvttsps_epu32(__mmask16 __U, __m512 __A) {
_mm512_maskz_cvtts_ps_epu32(__mmask16 __U, __m512 __A) {
return ((__m512i)__builtin_ia32_vcvttps2udqs512_round_mask(
(__v16sf)(__A), (__v16si)_mm512_setzero_si512(), __U,
_MM_FROUND_CUR_DIRECTION));
@ -230,20 +234,20 @@ _mm512_maskz_cvttsps_epu32(__mmask16 __U, __m512 __A) {
(__mmask16)(__U), (const int)(__R)))
// 512 bit : float -> long
static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_cvttsps_epi64(__m256 __A) {
static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_cvtts_ps_epi64(__m256 __A) {
return ((__m512i)__builtin_ia32_vcvttps2qqs512_round_mask(
(__v8sf)__A, (__v8di)_mm512_undefined_epi32(), (__mmask8)-1,
_MM_FROUND_CUR_DIRECTION));
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
_mm512_mask_cvttsps_epi64(__m512i __W, __mmask8 __U, __m256 __A) {
_mm512_mask_cvtts_ps_epi64(__m512i __W, __mmask8 __U, __m256 __A) {
return ((__m512i)__builtin_ia32_vcvttps2qqs512_round_mask(
(__v8sf)__A, (__v8di)__W, __U, _MM_FROUND_CUR_DIRECTION));
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
_mm512_maskz_cvttsps_epi64(__mmask8 __U, __m256 __A) {
_mm512_maskz_cvtts_ps_epi64(__mmask8 __U, __m256 __A) {
return ((__m512i)__builtin_ia32_vcvttps2qqs512_round_mask(
(__v8sf)__A, (__v8di)_mm512_setzero_si512(), __U,
_MM_FROUND_CUR_DIRECTION));
@ -265,20 +269,20 @@ _mm512_maskz_cvttsps_epi64(__mmask8 __U, __m256 __A) {
(const int)(__R)))
// 512 bit : float -> ulong
static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_cvttsps_epu64(__m256 __A) {
static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_cvtts_ps_epu64(__m256 __A) {
return ((__m512i)__builtin_ia32_vcvttps2uqqs512_round_mask(
(__v8sf)__A, (__v8di)_mm512_undefined_epi32(), (__mmask8)-1,
_MM_FROUND_CUR_DIRECTION));
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
_mm512_mask_cvttsps_epu64(__m512i __W, __mmask8 __U, __m256 __A) {
_mm512_mask_cvtts_ps_epu64(__m512i __W, __mmask8 __U, __m256 __A) {
return ((__m512i)__builtin_ia32_vcvttps2uqqs512_round_mask(
(__v8sf)__A, (__v8di)__W, __U, _MM_FROUND_CUR_DIRECTION));
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
_mm512_maskz_cvttsps_epu64(__mmask8 __U, __m256 __A) {
_mm512_maskz_cvtts_ps_epu64(__mmask8 __U, __m256 __A) {
return ((__m512i)__builtin_ia32_vcvttps2uqqs512_round_mask(
(__v8sf)__A, (__v8di)_mm512_setzero_si512(), __U,
_MM_FROUND_CUR_DIRECTION));

View File

@ -14,286 +14,286 @@
#ifndef __AVX10_2_512SATCVTINTRIN_H
#define __AVX10_2_512SATCVTINTRIN_H
#define _mm512_ipcvtbf16_epi8(A) \
#define _mm512_ipcvts_bf16_epi8(A) \
((__m512i)__builtin_ia32_vcvtbf162ibs512((__v32bf)(__m512bh)(A)))
#define _mm512_mask_ipcvtbf16_epi8(W, U, A) \
#define _mm512_mask_ipcvts_bf16_epi8(W, U, A) \
((__m512i)__builtin_ia32_selectw_512((__mmask32)(U), \
(__v32hi)_mm512_ipcvtbf16_epi8(A), \
(__v32hi)_mm512_ipcvts_bf16_epi8(A), \
(__v32hi)(__m512i)(W)))
#define _mm512_maskz_ipcvtbf16_epi8(U, A) \
#define _mm512_maskz_ipcvts_bf16_epi8(U, A) \
((__m512i)__builtin_ia32_selectw_512((__mmask32)(U), \
(__v32hi)_mm512_ipcvtbf16_epi8(A), \
(__v32hi)_mm512_ipcvts_bf16_epi8(A), \
(__v32hi)_mm512_setzero_si512()))
#define _mm512_ipcvtbf16_epu8(A) \
#define _mm512_ipcvts_bf16_epu8(A) \
((__m512i)__builtin_ia32_vcvtbf162iubs512((__v32bf)(__m512bh)(A)))
#define _mm512_mask_ipcvtbf16_epu8(W, U, A) \
#define _mm512_mask_ipcvts_bf16_epu8(W, U, A) \
((__m512i)__builtin_ia32_selectw_512((__mmask32)(U), \
(__v32hi)_mm512_ipcvtbf16_epu8(A), \
(__v32hi)_mm512_ipcvts_bf16_epu8(A), \
(__v32hi)(__m512i)(W)))
#define _mm512_maskz_ipcvtbf16_epu8(U, A) \
#define _mm512_maskz_ipcvts_bf16_epu8(U, A) \
((__m512i)__builtin_ia32_selectw_512((__mmask32)(U), \
(__v32hi)_mm512_ipcvtbf16_epu8(A), \
(__v32hi)_mm512_ipcvts_bf16_epu8(A), \
(__v32hi)_mm512_setzero_si512()))
#define _mm512_ipcvttbf16_epi8(A) \
#define _mm512_ipcvtts_bf16_epi8(A) \
((__m512i)__builtin_ia32_vcvttbf162ibs512((__v32bf)(__m512bh)(A)))
#define _mm512_mask_ipcvttbf16_epi8(W, U, A) \
#define _mm512_mask_ipcvtts_bf16_epi8(W, U, A) \
((__m512i)__builtin_ia32_selectw_512((__mmask32)(U), \
(__v32hi)_mm512_ipcvttbf16_epi8(A), \
(__v32hi)_mm512_ipcvtts_bf16_epi8(A), \
(__v32hi)(__m512i)(W)))
#define _mm512_maskz_ipcvttbf16_epi8(U, A) \
#define _mm512_maskz_ipcvtts_bf16_epi8(U, A) \
((__m512i)__builtin_ia32_selectw_512((__mmask32)(U), \
(__v32hi)_mm512_ipcvttbf16_epi8(A), \
(__v32hi)_mm512_ipcvtts_bf16_epi8(A), \
(__v32hi)_mm512_setzero_si512()))
#define _mm512_ipcvttbf16_epu8(A) \
#define _mm512_ipcvtts_bf16_epu8(A) \
((__m512i)__builtin_ia32_vcvttbf162iubs512((__v32bf)(__m512bh)(A)))
#define _mm512_mask_ipcvttbf16_epu8(W, U, A) \
#define _mm512_mask_ipcvtts_bf16_epu8(W, U, A) \
((__m512i)__builtin_ia32_selectw_512((__mmask32)(U), \
(__v32hi)_mm512_ipcvttbf16_epu8(A), \
(__v32hi)_mm512_ipcvtts_bf16_epu8(A), \
(__v32hi)(__m512i)(W)))
#define _mm512_maskz_ipcvttbf16_epu8(U, A) \
#define _mm512_maskz_ipcvtts_bf16_epu8(U, A) \
((__m512i)__builtin_ia32_selectw_512((__mmask32)(U), \
(__v32hi)_mm512_ipcvttbf16_epu8(A), \
(__v32hi)_mm512_ipcvtts_bf16_epu8(A), \
(__v32hi)_mm512_setzero_si512()))
#define _mm512_ipcvtph_epi8(A) \
#define _mm512_ipcvts_ph_epi8(A) \
((__m512i)__builtin_ia32_vcvtph2ibs512_mask( \
(__v32hf)(__m512h)(A), (__v32hu)_mm512_setzero_si512(), (__mmask32)-1, \
(__v32hf)(__m512h)(A), (__v32hu)_mm512_setzero_si512(), (__mmask32) - 1, \
_MM_FROUND_CUR_DIRECTION))
#define _mm512_mask_ipcvtph_epi8(W, U, A) \
#define _mm512_mask_ipcvts_ph_epi8(W, U, A) \
((__m512i)__builtin_ia32_vcvtph2ibs512_mask((__v32hf)(__m512h)(A), \
(__v32hu)(W), (__mmask32)(U), \
_MM_FROUND_CUR_DIRECTION))
#define _mm512_maskz_ipcvtph_epi8(U, A) \
#define _mm512_maskz_ipcvts_ph_epi8(U, A) \
((__m512i)__builtin_ia32_vcvtph2ibs512_mask( \
(__v32hf)(__m512h)(A), (__v32hu)_mm512_setzero_si512(), (__mmask32)(U), \
_MM_FROUND_CUR_DIRECTION))
#define _mm512_ipcvt_roundph_epi8(A, R) \
#define _mm512_ipcvts_roundph_epi8(A, R) \
((__m512i)__builtin_ia32_vcvtph2ibs512_mask((__v32hf)(__m512h)(A), \
(__v32hu)_mm512_setzero_si512(), \
(__mmask32)-1, (const int)R))
(__mmask32) - 1, (const int)R))
#define _mm512_mask_ipcvt_roundph_epi8(W, U, A, R) \
#define _mm512_mask_ipcvts_roundph_epi8(W, U, A, R) \
((__m512i)__builtin_ia32_vcvtph2ibs512_mask( \
(__v32hf)(__m512h)(A), (__v32hu)(W), (__mmask32)(U), (const int)R))
#define _mm512_maskz_ipcvt_roundph_epi8(U, A, R) \
#define _mm512_maskz_ipcvts_roundph_epi8(U, A, R) \
((__m512i)__builtin_ia32_vcvtph2ibs512_mask((__v32hf)(__m512h)(A), \
(__v32hu)_mm512_setzero_si512(), \
(__mmask32)(U), (const int)R))
#define _mm512_ipcvtph_epu8(A) \
#define _mm512_ipcvts_ph_epu8(A) \
((__m512i)__builtin_ia32_vcvtph2iubs512_mask( \
(__v32hf)(__m512h)(A), (__v32hu)_mm512_setzero_si512(), (__mmask32)-1, \
(__v32hf)(__m512h)(A), (__v32hu)_mm512_setzero_si512(), (__mmask32) - 1, \
_MM_FROUND_CUR_DIRECTION))
#define _mm512_mask_ipcvtph_epu8(W, U, A) \
#define _mm512_mask_ipcvts_ph_epu8(W, U, A) \
((__m512i)__builtin_ia32_vcvtph2iubs512_mask((__v32hf)(__m512h)(A), \
(__v32hu)(W), (__mmask32)(U), \
_MM_FROUND_CUR_DIRECTION))
#define _mm512_maskz_ipcvtph_epu8(U, A) \
#define _mm512_maskz_ipcvts_ph_epu8(U, A) \
((__m512i)__builtin_ia32_vcvtph2iubs512_mask( \
(__v32hf)(__m512h)(A), (__v32hu)_mm512_setzero_si512(), (__mmask32)(U), \
_MM_FROUND_CUR_DIRECTION))
#define _mm512_ipcvt_roundph_epu8(A, R) \
#define _mm512_ipcvts_roundph_epu8(A, R) \
((__m512i)__builtin_ia32_vcvtph2iubs512_mask( \
(__v32hf)(__m512h)(A), (__v32hu)_mm512_setzero_si512(), (__mmask32)-1, \
(__v32hf)(__m512h)(A), (__v32hu)_mm512_setzero_si512(), (__mmask32) - 1, \
(const int)R))
#define _mm512_mask_ipcvt_roundph_epu8(W, U, A, R) \
#define _mm512_mask_ipcvts_roundph_epu8(W, U, A, R) \
((__m512i)__builtin_ia32_vcvtph2iubs512_mask( \
(__v32hf)(__m512h)(A), (__v32hu)(W), (__mmask32)(U), (const int)R))
#define _mm512_maskz_ipcvt_roundph_epu8(U, A, R) \
#define _mm512_maskz_ipcvts_roundph_epu8(U, A, R) \
((__m512i)__builtin_ia32_vcvtph2iubs512_mask( \
(__v32hf)(__m512h)(A), (__v32hu)_mm512_setzero_si512(), (__mmask32)(U), \
(const int)R))
#define _mm512_ipcvtps_epi8(A) \
#define _mm512_ipcvts_ps_epi8(A) \
((__m512i)__builtin_ia32_vcvtps2ibs512_mask( \
(__v16sf)(__m512)(A), (__v16su)_mm512_setzero_si512(), (__mmask16)-1, \
(__v16sf)(__m512)(A), (__v16su)_mm512_setzero_si512(), (__mmask16) - 1, \
_MM_FROUND_CUR_DIRECTION))
#define _mm512_mask_ipcvtps_epi8(W, U, A) \
#define _mm512_mask_ipcvts_ps_epi8(W, U, A) \
((__m512i)__builtin_ia32_vcvtps2ibs512_mask((__v16sf)(__m512)(A), \
(__v16su)(W), (__mmask16)(U), \
_MM_FROUND_CUR_DIRECTION))
#define _mm512_maskz_ipcvtps_epi8(U, A) \
#define _mm512_maskz_ipcvts_ps_epi8(U, A) \
((__m512i)__builtin_ia32_vcvtps2ibs512_mask( \
(__v16sf)(__m512)(A), (__v16su)_mm512_setzero_si512(), (__mmask16)(U), \
_MM_FROUND_CUR_DIRECTION))
#define _mm512_ipcvt_roundps_epi8(A, R) \
#define _mm512_ipcvts_roundps_epi8(A, R) \
((__m512i)__builtin_ia32_vcvtps2ibs512_mask((__v16sf)(__m512)(A), \
(__v16su)_mm512_setzero_si512(), \
(__mmask16)-1, (const int)R))
(__mmask16) - 1, (const int)R))
#define _mm512_mask_ipcvt_roundps_epi8(W, U, A, R) \
#define _mm512_mask_ipcvts_roundps_epi8(W, U, A, R) \
((__m512i)__builtin_ia32_vcvtps2ibs512_mask( \
(__v16sf)(__m512)(A), (__v16su)(W), (__mmask16)(U), (const int)R))
#define _mm512_maskz_ipcvt_roundps_epi8(U, A, R) \
#define _mm512_maskz_ipcvts_roundps_epi8(U, A, R) \
((__m512i)__builtin_ia32_vcvtps2ibs512_mask((__v16sf)(__m512)(A), \
(__v16su)_mm512_setzero_si512(), \
(__mmask16)(U), (const int)R))
#define _mm512_ipcvtps_epu8(A) \
#define _mm512_ipcvts_ps_epu8(A) \
((__m512i)__builtin_ia32_vcvtps2iubs512_mask( \
(__v16sf)(__m512)(A), (__v16su)_mm512_setzero_si512(), (__mmask16)-1, \
(__v16sf)(__m512)(A), (__v16su)_mm512_setzero_si512(), (__mmask16) - 1, \
_MM_FROUND_CUR_DIRECTION))
#define _mm512_mask_ipcvtps_epu8(W, U, A) \
#define _mm512_mask_ipcvts_ps_epu8(W, U, A) \
((__m512i)__builtin_ia32_vcvtps2iubs512_mask((__v16sf)(__m512)(A), \
(__v16su)(W), (__mmask16)(U), \
_MM_FROUND_CUR_DIRECTION))
#define _mm512_maskz_ipcvtps_epu8(U, A) \
#define _mm512_maskz_ipcvts_ps_epu8(U, A) \
((__m512i)__builtin_ia32_vcvtps2iubs512_mask( \
(__v16sf)(__m512)(A), (__v16su)_mm512_setzero_si512(), (__mmask16)(U), \
_MM_FROUND_CUR_DIRECTION))
#define _mm512_ipcvt_roundps_epu8(A, R) \
#define _mm512_ipcvts_roundps_epu8(A, R) \
((__m512i)__builtin_ia32_vcvtps2iubs512_mask( \
(__v16sf)(__m512)(A), (__v16su)_mm512_setzero_si512(), (__mmask16)-1, \
(__v16sf)(__m512)(A), (__v16su)_mm512_setzero_si512(), (__mmask16) - 1, \
(const int)R))
#define _mm512_mask_ipcvt_roundps_epu8(W, U, A, R) \
#define _mm512_mask_ipcvts_roundps_epu8(W, U, A, R) \
((__m512i)__builtin_ia32_vcvtps2iubs512_mask( \
(__v16sf)(__m512)(A), (__v16su)(W), (__mmask16)(U), (const int)R))
#define _mm512_maskz_ipcvt_roundps_epu8(U, A, R) \
#define _mm512_maskz_ipcvts_roundps_epu8(U, A, R) \
((__m512i)__builtin_ia32_vcvtps2iubs512_mask( \
(__v16sf)(__m512)(A), (__v16su)_mm512_setzero_si512(), (__mmask16)(U), \
(const int)R))
#define _mm512_ipcvttph_epi8(A) \
#define _mm512_ipcvtts_ph_epi8(A) \
((__m512i)__builtin_ia32_vcvttph2ibs512_mask( \
(__v32hf)(__m512h)(A), (__v32hu)_mm512_setzero_si512(), (__mmask32)-1, \
(__v32hf)(__m512h)(A), (__v32hu)_mm512_setzero_si512(), (__mmask32) - 1, \
_MM_FROUND_CUR_DIRECTION))
#define _mm512_mask_ipcvttph_epi8(W, U, A) \
#define _mm512_mask_ipcvtts_ph_epi8(W, U, A) \
((__m512i)__builtin_ia32_vcvttph2ibs512_mask((__v32hf)(__m512h)(A), \
(__v32hu)(W), (__mmask32)(U), \
_MM_FROUND_CUR_DIRECTION))
#define _mm512_maskz_ipcvttph_epi8(U, A) \
#define _mm512_maskz_ipcvtts_ph_epi8(U, A) \
((__m512i)__builtin_ia32_vcvttph2ibs512_mask( \
(__v32hf)(__m512h)(A), (__v32hu)_mm512_setzero_si512(), (__mmask32)(U), \
_MM_FROUND_CUR_DIRECTION))
#define _mm512_ipcvtt_roundph_epi8(A, S) \
#define _mm512_ipcvtts_roundph_epi8(A, S) \
((__m512i)__builtin_ia32_vcvttph2ibs512_mask( \
(__v32hf)(__m512h)(A), (__v32hu)_mm512_setzero_si512(), (__mmask32)-1, \
(__v32hf)(__m512h)(A), (__v32hu)_mm512_setzero_si512(), (__mmask32) - 1, \
S))
#define _mm512_mask_ipcvtt_roundph_epi8(W, U, A, S) \
#define _mm512_mask_ipcvtts_roundph_epi8(W, U, A, S) \
((__m512i)__builtin_ia32_vcvttph2ibs512_mask( \
(__v32hf)(__m512h)(A), (__v32hu)(W), (__mmask32)(U), S))
#define _mm512_maskz_ipcvtt_roundph_epi8(U, A, S) \
#define _mm512_maskz_ipcvtts_roundph_epi8(U, A, S) \
((__m512i)__builtin_ia32_vcvttph2ibs512_mask( \
(__v32hf)(__m512h)(A), (__v32hu)_mm512_setzero_si512(), (__mmask32)(U), \
S))
#define _mm512_ipcvttph_epu8(A) \
#define _mm512_ipcvtts_ph_epu8(A) \
((__m512i)__builtin_ia32_vcvttph2iubs512_mask( \
(__v32hf)(__m512h)(A), (__v32hu)_mm512_setzero_si512(), (__mmask32)-1, \
(__v32hf)(__m512h)(A), (__v32hu)_mm512_setzero_si512(), (__mmask32) - 1, \
_MM_FROUND_CUR_DIRECTION))
#define _mm512_mask_ipcvttph_epu8(W, U, A) \
#define _mm512_mask_ipcvtts_ph_epu8(W, U, A) \
((__m512i)__builtin_ia32_vcvttph2iubs512_mask((__v32hf)(__m512h)(A), \
(__v32hu)(W), (__mmask32)(U), \
_MM_FROUND_CUR_DIRECTION))
#define _mm512_maskz_ipcvttph_epu8(U, A) \
#define _mm512_maskz_ipcvtts_ph_epu8(U, A) \
((__m512i)__builtin_ia32_vcvttph2iubs512_mask( \
(__v32hf)(__m512h)(A), (__v32hu)_mm512_setzero_si512(), (__mmask32)(U), \
_MM_FROUND_CUR_DIRECTION))
#define _mm512_ipcvtt_roundph_epu8(A, S) \
#define _mm512_ipcvtts_roundph_epu8(A, S) \
((__m512i)__builtin_ia32_vcvttph2iubs512_mask( \
(__v32hf)(__m512h)(A), (__v32hu)_mm512_setzero_si512(), (__mmask32)-1, \
(__v32hf)(__m512h)(A), (__v32hu)_mm512_setzero_si512(), (__mmask32) - 1, \
S))
#define _mm512_mask_ipcvtt_roundph_epu8(W, U, A, S) \
#define _mm512_mask_ipcvtts_roundph_epu8(W, U, A, S) \
((__m512i)__builtin_ia32_vcvttph2iubs512_mask( \
(__v32hf)(__m512h)(A), (__v32hu)(W), (__mmask32)(U), S))
#define _mm512_maskz_ipcvtt_roundph_epu8(U, A, S) \
#define _mm512_maskz_ipcvtts_roundph_epu8(U, A, S) \
((__m512i)__builtin_ia32_vcvttph2iubs512_mask( \
(__v32hf)(__m512h)(A), (__v32hu)_mm512_setzero_si512(), (__mmask32)(U), \
S))
#define _mm512_ipcvttps_epi8(A) \
#define _mm512_ipcvtts_ps_epi8(A) \
((__m512i)__builtin_ia32_vcvttps2ibs512_mask( \
(__v16sf)(__m512h)(A), (__v16su)_mm512_setzero_si512(), (__mmask16)-1, \
(__v16sf)(__m512h)(A), (__v16su)_mm512_setzero_si512(), (__mmask16) - 1, \
_MM_FROUND_CUR_DIRECTION))
#define _mm512_mask_ipcvttps_epi8(W, U, A) \
#define _mm512_mask_ipcvtts_ps_epi8(W, U, A) \
((__m512i)__builtin_ia32_vcvttps2ibs512_mask((__v16sf)(__m512h)(A), \
(__v16su)(W), (__mmask16)(U), \
_MM_FROUND_CUR_DIRECTION))
#define _mm512_maskz_ipcvttps_epi8(U, A) \
#define _mm512_maskz_ipcvtts_ps_epi8(U, A) \
((__m512i)__builtin_ia32_vcvttps2ibs512_mask( \
(__v16sf)(__m512h)(A), (__v16su)_mm512_setzero_si512(), (__mmask16)(U), \
_MM_FROUND_CUR_DIRECTION))
#define _mm512_ipcvtt_roundps_epi8(A, S) \
#define _mm512_ipcvtts_roundps_epi8(A, S) \
((__m512i)__builtin_ia32_vcvttps2ibs512_mask( \
(__v16sf)(__m512h)(A), (__v16su)_mm512_setzero_si512(), (__mmask16)-1, \
(__v16sf)(__m512h)(A), (__v16su)_mm512_setzero_si512(), (__mmask16) - 1, \
S))
#define _mm512_mask_ipcvtt_roundps_epi8(W, U, A, S) \
#define _mm512_mask_ipcvtts_roundps_epi8(W, U, A, S) \
((__m512i)__builtin_ia32_vcvttps2ibs512_mask( \
(__v16sf)(__m512h)(A), (__v16su)(W), (__mmask16)(U), S))
#define _mm512_maskz_ipcvtt_roundps_epi8(U, A, S) \
#define _mm512_maskz_ipcvtts_roundps_epi8(U, A, S) \
((__m512i)__builtin_ia32_vcvttps2ibs512_mask( \
(__v16sf)(__m512h)(A), (__v16su)_mm512_setzero_si512(), (__mmask16)(U), \
S))
#define _mm512_ipcvttps_epu8(A) \
#define _mm512_ipcvtts_ps_epu8(A) \
((__m512i)__builtin_ia32_vcvttps2iubs512_mask( \
(__v16sf)(__m512h)(A), (__v16su)_mm512_setzero_si512(), (__mmask16)-1, \
(__v16sf)(__m512h)(A), (__v16su)_mm512_setzero_si512(), (__mmask16) - 1, \
_MM_FROUND_CUR_DIRECTION))
#define _mm512_mask_ipcvttps_epu8(W, U, A) \
#define _mm512_mask_ipcvtts_ps_epu8(W, U, A) \
((__m512i)__builtin_ia32_vcvttps2iubs512_mask((__v16sf)(__m512h)(A), \
(__v16su)(W), (__mmask16)(U), \
_MM_FROUND_CUR_DIRECTION))
#define _mm512_maskz_ipcvttps_epu8(U, A) \
#define _mm512_maskz_ipcvtts_ps_epu8(U, A) \
((__m512i)__builtin_ia32_vcvttps2iubs512_mask( \
(__v16sf)(__m512h)(A), (__v16su)_mm512_setzero_si512(), (__mmask16)(U), \
_MM_FROUND_CUR_DIRECTION))
#define _mm512_ipcvtt_roundps_epu8(A, S) \
#define _mm512_ipcvtts_roundps_epu8(A, S) \
((__m512i)__builtin_ia32_vcvttps2iubs512_mask( \
(__v16sf)(__m512h)(A), (__v16su)_mm512_setzero_si512(), (__mmask16)-1, \
(__v16sf)(__m512h)(A), (__v16su)_mm512_setzero_si512(), (__mmask16) - 1, \
S))
#define _mm512_mask_ipcvtt_roundps_epu8(W, U, A, S) \
#define _mm512_mask_ipcvtts_roundps_epu8(W, U, A, S) \
((__m512i)__builtin_ia32_vcvttps2iubs512_mask( \
(__v16sf)(__m512h)(A), (__v16su)(W), (__mmask16)(U), S))
#define _mm512_maskz_ipcvtt_roundps_epu8(U, A, S) \
#define _mm512_maskz_ipcvtts_roundps_epu8(U, A, S) \
((__m512i)__builtin_ia32_vcvttps2iubs512_mask( \
(__v16sf)(__m512h)(A), (__v16su)_mm512_setzero_si512(), (__mmask16)(U), \
S))

View File

@ -852,8 +852,8 @@ _mm_maskz_sqrt_pbh(__mmask8 __U, __m128bh __A) {
static __inline__ __m256bh __DEFAULT_FN_ATTRS256
_mm256_fmadd_pbh(__m256bh __A, __m256bh __B, __m256bh __C) {
return (__m256bh)__builtin_ia32_vfmaddnepbh256((__v16bf)__A, (__v16bf)__B,
(__v16bf)__C);
return (__m256bh)__builtin_ia32_vfmaddbf16256((__v16bf)__A, (__v16bf)__B,
(__v16bf)__C);
}
static __inline__ __m256bh __DEFAULT_FN_ATTRS256
@ -880,8 +880,8 @@ static __inline__ __m256bh __DEFAULT_FN_ATTRS256 _mm256_maskz_fmadd_pbh(
static __inline__ __m256bh __DEFAULT_FN_ATTRS256
_mm256_fmsub_pbh(__m256bh __A, __m256bh __B, __m256bh __C) {
return (__m256bh)__builtin_ia32_vfmaddnepbh256((__v16bf)__A, (__v16bf)__B,
-(__v16bf)__C);
return (__m256bh)__builtin_ia32_vfmaddbf16256((__v16bf)__A, (__v16bf)__B,
-(__v16bf)__C);
}
static __inline__ __m256bh __DEFAULT_FN_ATTRS256
@ -908,8 +908,8 @@ static __inline__ __m256bh __DEFAULT_FN_ATTRS256 _mm256_maskz_fmsub_pbh(
static __inline__ __m256bh __DEFAULT_FN_ATTRS256
_mm256_fnmadd_pbh(__m256bh __A, __m256bh __B, __m256bh __C) {
return (__m256bh)__builtin_ia32_vfmaddnepbh256((__v16bf)__A, -(__v16bf)__B,
(__v16bf)__C);
return (__m256bh)__builtin_ia32_vfmaddbf16256((__v16bf)__A, -(__v16bf)__B,
(__v16bf)__C);
}
static __inline__ __m256bh __DEFAULT_FN_ATTRS256 _mm256_mask_fnmadd_pbh(
@ -938,8 +938,8 @@ static __inline__ __m256bh __DEFAULT_FN_ATTRS256 _mm256_maskz_fnmadd_pbh(
static __inline__ __m256bh __DEFAULT_FN_ATTRS256
_mm256_fnmsub_pbh(__m256bh __A, __m256bh __B, __m256bh __C) {
return (__m256bh)__builtin_ia32_vfmaddnepbh256((__v16bf)__A, -(__v16bf)__B,
-(__v16bf)__C);
return (__m256bh)__builtin_ia32_vfmaddbf16256((__v16bf)__A, -(__v16bf)__B,
-(__v16bf)__C);
}
static __inline__ __m256bh __DEFAULT_FN_ATTRS256 _mm256_mask_fnmsub_pbh(
@ -969,8 +969,8 @@ static __inline__ __m256bh __DEFAULT_FN_ATTRS256 _mm256_maskz_fnmsub_pbh(
static __inline__ __m128bh __DEFAULT_FN_ATTRS128 _mm_fmadd_pbh(__m128bh __A,
__m128bh __B,
__m128bh __C) {
return (__m128bh)__builtin_ia32_vfmaddnepbh128((__v8bf)__A, (__v8bf)__B,
(__v8bf)__C);
return (__m128bh)__builtin_ia32_vfmaddbf16128((__v8bf)__A, (__v8bf)__B,
(__v8bf)__C);
}
static __inline__ __m128bh __DEFAULT_FN_ATTRS128
@ -997,8 +997,8 @@ _mm_maskz_fmadd_pbh(__mmask8 __U, __m128bh __A, __m128bh __B, __m128bh __C) {
static __inline__ __m128bh __DEFAULT_FN_ATTRS128 _mm_fmsub_pbh(__m128bh __A,
__m128bh __B,
__m128bh __C) {
return (__m128bh)__builtin_ia32_vfmaddnepbh128((__v8bf)__A, (__v8bf)__B,
-(__v8bf)__C);
return (__m128bh)__builtin_ia32_vfmaddbf16128((__v8bf)__A, (__v8bf)__B,
-(__v8bf)__C);
}
static __inline__ __m128bh __DEFAULT_FN_ATTRS128
@ -1025,8 +1025,8 @@ _mm_maskz_fmsub_pbh(__mmask8 __U, __m128bh __A, __m128bh __B, __m128bh __C) {
static __inline__ __m128bh __DEFAULT_FN_ATTRS128 _mm_fnmadd_pbh(__m128bh __A,
__m128bh __B,
__m128bh __C) {
return (__m128bh)__builtin_ia32_vfmaddnepbh128((__v8bf)__A, -(__v8bf)__B,
(__v8bf)__C);
return (__m128bh)__builtin_ia32_vfmaddbf16128((__v8bf)__A, -(__v8bf)__B,
(__v8bf)__C);
}
static __inline__ __m128bh __DEFAULT_FN_ATTRS128
@ -1053,8 +1053,8 @@ _mm_maskz_fnmadd_pbh(__mmask8 __U, __m128bh __A, __m128bh __B, __m128bh __C) {
static __inline__ __m128bh __DEFAULT_FN_ATTRS128 _mm_fnmsub_pbh(__m128bh __A,
__m128bh __B,
__m128bh __C) {
return (__m128bh)__builtin_ia32_vfmaddnepbh128((__v8bf)__A, -(__v8bf)__B,
-(__v8bf)__C);
return (__m128bh)__builtin_ia32_vfmaddbf16128((__v8bf)__A, -(__v8bf)__B,
-(__v8bf)__C);
}
static __inline__ __m128bh __DEFAULT_FN_ATTRS128

File diff suppressed because it is too large Load Diff

View File

@ -66,34 +66,19 @@
(__v2df)_mm_setzero_pd(), (__mmask8)(U)))
#define _mm256_minmax_pd(A, B, C) \
((__m256d)__builtin_ia32_vminmaxpd256_round_mask( \
((__m256d)__builtin_ia32_vminmaxpd256_mask( \
(__v4df)(__m256d)(A), (__v4df)(__m256d)(B), (int)(C), \
(__v4df)_mm256_setzero_pd(), (__mmask8)-1, _MM_FROUND_NO_EXC))
(__v4df)_mm256_setzero_pd(), (__mmask8)-1))
#define _mm256_mask_minmax_pd(W, U, A, B, C) \
((__m256d)__builtin_ia32_vminmaxpd256_round_mask( \
((__m256d)__builtin_ia32_vminmaxpd256_mask( \
(__v4df)(__m256d)(A), (__v4df)(__m256d)(B), (int)(C), \
(__v4df)(__m256d)(W), (__mmask8)(U), _MM_FROUND_NO_EXC))
(__v4df)(__m256d)(W), (__mmask8)(U)))
#define _mm256_maskz_minmax_pd(U, A, B, C) \
((__m256d)__builtin_ia32_vminmaxpd256_round_mask( \
((__m256d)__builtin_ia32_vminmaxpd256_mask( \
(__v4df)(__m256d)(A), (__v4df)(__m256d)(B), (int)(C), \
(__v4df)_mm256_setzero_pd(), (__mmask8)(U), _MM_FROUND_NO_EXC))
#define _mm256_minmax_round_pd(A, B, C, R) \
((__m256d)__builtin_ia32_vminmaxpd256_round_mask( \
(__v4df)(__m256d)(A), (__v4df)(__m256d)(B), (int)(C), \
(__v4df)_mm256_undefined_pd(), (__mmask8)-1, (int)(R)))
#define _mm256_mask_minmax_round_pd(W, U, A, B, C, R) \
((__m256d)__builtin_ia32_vminmaxpd256_round_mask( \
(__v4df)(__m256d)(A), (__v4df)(__m256d)(B), (int)(C), \
(__v4df)(__m256d)(W), (__mmask8)(U), (int)(R)))
#define _mm256_maskz_minmax_round_pd(U, A, B, C, R) \
((__m256d)__builtin_ia32_vminmaxpd256_round_mask( \
(__v4df)(__m256d)(A), (__v4df)(__m256d)(B), (int)(C), \
(__v4df)_mm256_setzero_pd(), (__mmask8)(U), (int)(R)))
(__v4df)_mm256_setzero_pd(), (__mmask8)(U)))
#define _mm_minmax_ph(A, B, C) \
((__m128h)__builtin_ia32_vminmaxph128_mask( \
@ -111,34 +96,19 @@
(__v8hf)_mm_setzero_ph(), (__mmask8)(U)))
#define _mm256_minmax_ph(A, B, C) \
((__m256h)__builtin_ia32_vminmaxph256_round_mask( \
((__m256h)__builtin_ia32_vminmaxph256_mask( \
(__v16hf)(__m256h)(A), (__v16hf)(__m256h)(B), (int)(C), \
(__v16hf)_mm256_setzero_ph(), (__mmask16)-1, _MM_FROUND_NO_EXC))
(__v16hf)_mm256_setzero_ph(), (__mmask16)-1))
#define _mm256_mask_minmax_ph(W, U, A, B, C) \
((__m256h)__builtin_ia32_vminmaxph256_round_mask( \
((__m256h)__builtin_ia32_vminmaxph256_mask( \
(__v16hf)(__m256h)(A), (__v16hf)(__m256h)(B), (int)(C), \
(__v16hf)(__m256h)(W), (__mmask16)(U), _MM_FROUND_NO_EXC))
(__v16hf)(__m256h)(W), (__mmask16)(U)))
#define _mm256_maskz_minmax_ph(U, A, B, C) \
((__m256h)__builtin_ia32_vminmaxph256_round_mask( \
((__m256h)__builtin_ia32_vminmaxph256_mask( \
(__v16hf)(__m256h)(A), (__v16hf)(__m256h)(B), (int)(C), \
(__v16hf)_mm256_setzero_ph(), (__mmask16)(U), _MM_FROUND_NO_EXC))
#define _mm256_minmax_round_ph(A, B, C, R) \
((__m256h)__builtin_ia32_vminmaxph256_round_mask( \
(__v16hf)(__m256h)(A), (__v16hf)(__m256h)(B), (int)(C), \
(__v16hf)_mm256_undefined_ph(), (__mmask16)-1, (int)(R)))
#define _mm256_mask_minmax_round_ph(W, U, A, B, C, R) \
((__m256h)__builtin_ia32_vminmaxph256_round_mask( \
(__v16hf)(__m256h)(A), (__v16hf)(__m256h)(B), (C), \
(__v16hf)(__m256h)(W), (__mmask16)(U), (int)(R)))
#define _mm256_maskz_minmax_round_ph(U, A, B, C, R) \
((__m256h)__builtin_ia32_vminmaxph256_round_mask( \
(__v16hf)(__m256h)(A), (__v16hf)(__m256h)(B), (int)(C), \
(__v16hf)_mm256_setzero_ph(), (__mmask16)(U), (int)(R)))
(__v16hf)_mm256_setzero_ph(), (__mmask16)(U)))
#define _mm_minmax_ps(A, B, C) \
((__m128)__builtin_ia32_vminmaxps128_mask( \
@ -156,34 +126,19 @@
(__v4sf)_mm_setzero_ps(), (__mmask8)(U)))
#define _mm256_minmax_ps(A, B, C) \
((__m256)__builtin_ia32_vminmaxps256_round_mask( \
((__m256)__builtin_ia32_vminmaxps256_mask( \
(__v8sf)(__m256)(A), (__v8sf)(__m256)(B), (int)(C), \
(__v8sf)_mm256_setzero_ps(), (__mmask8)-1, _MM_FROUND_NO_EXC))
(__v8sf)_mm256_setzero_ps(), (__mmask8)-1))
#define _mm256_mask_minmax_ps(W, U, A, B, C) \
((__m256)__builtin_ia32_vminmaxps256_round_mask( \
((__m256)__builtin_ia32_vminmaxps256_mask( \
(__v8sf)(__m256)(A), (__v8sf)(__m256)(B), (int)(C), (__v8sf)(__m256)(W), \
(__mmask8)(U), _MM_FROUND_NO_EXC))
(__mmask8)(U)))
#define _mm256_maskz_minmax_ps(U, A, B, C) \
((__m256)__builtin_ia32_vminmaxps256_round_mask( \
((__m256)__builtin_ia32_vminmaxps256_mask( \
(__v8sf)(__m256)(A), (__v8sf)(__m256)(B), (int)(C), \
(__v8sf)_mm256_setzero_ps(), (__mmask8)(U), _MM_FROUND_NO_EXC))
#define _mm256_minmax_round_ps(A, B, C, R) \
((__m256)__builtin_ia32_vminmaxps256_round_mask( \
(__v8sf)(__m256)(A), (__v8sf)(__m256)(B), (int)(C), \
(__v8sf)_mm256_undefined_ps(), (__mmask8)-1, (int)(R)))
#define _mm256_mask_minmax_round_ps(W, U, A, B, C, R) \
((__m256)__builtin_ia32_vminmaxps256_round_mask( \
(__v8sf)(__m256)(A), (__v8sf)(__m256)(B), (int)(C), (__v8sf)(__m256)(W), \
(__mmask8)(U), (int)(R)))
#define _mm256_maskz_minmax_round_ps(U, A, B, C, R) \
((__m256)__builtin_ia32_vminmaxps256_round_mask( \
(__v8sf)(__m256)(A), (__v8sf)(__m256)(B), (int)(C), \
(__v8sf)_mm256_setzero_ps(), (__mmask8)(U), (int)(R)))
(__v8sf)_mm256_setzero_ps(), (__mmask8)(U)))
#define _mm_minmax_sd(A, B, C) \
((__m128d)__builtin_ia32_vminmaxsd_round_mask( \

File diff suppressed because it is too large Load Diff

View File

@ -71,175 +71,134 @@
#endif /* __x86_64__ */
// 128 Bit : Double -> int
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_cvttspd_epi32(__m128d __A) {
static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_cvtts_pd_epi32(__m128d __A) {
return ((__m128i)__builtin_ia32_vcvttpd2dqs128_mask(
(__v2df)__A, (__v4si)(__m128i)_mm_undefined_si128(), (__mmask8)(-1)));
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_mask_cvttspd_epi32(__m128i __W, __mmask8 __U, __m128d __A) {
_mm_mask_cvtts_pd_epi32(__m128i __W, __mmask8 __U, __m128d __A) {
return ((__m128i)__builtin_ia32_vcvttpd2dqs128_mask((__v2df)__A, (__v4si)__W,
__U));
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_maskz_cvttspd_epi32(__mmask16 __U, __m128d __A) {
_mm_maskz_cvtts_pd_epi32(__mmask16 __U, __m128d __A) {
return ((__m128i)__builtin_ia32_vcvttpd2dqs128_mask(
(__v2df)__A, (__v4si)(__m128i)_mm_setzero_si128(), __U));
}
// 256 Bit : Double -> int
static __inline__ __m128i __DEFAULT_FN_ATTRS256
_mm256_cvttspd_epi32(__m256d __A) {
return ((__m128i)__builtin_ia32_vcvttpd2dqs256_round_mask(
(__v4df)__A, (__v4si)_mm_undefined_si128(), (__mmask8)-1,
_MM_FROUND_CUR_DIRECTION));
_mm256_cvtts_pd_epi32(__m256d __A) {
return ((__m128i)__builtin_ia32_vcvttpd2dqs256_mask(
(__v4df)__A, (__v4si)_mm_undefined_si128(), (__mmask8)-1));
}
static __inline__ __m128i __DEFAULT_FN_ATTRS256
_mm256_mask_cvttspd_epi32(__m128i __W, __mmask8 __U, __m256d __A) {
return ((__m128i)__builtin_ia32_vcvttpd2dqs256_round_mask(
(__v4df)__A, (__v4si)__W, __U, _MM_FROUND_CUR_DIRECTION));
_mm256_mask_cvtts_pd_epi32(__m128i __W, __mmask8 __U, __m256d __A) {
return ((__m128i)__builtin_ia32_vcvttpd2dqs256_mask((__v4df)__A, (__v4si)__W,
__U));
}
static __inline__ __m128i __DEFAULT_FN_ATTRS256
_mm256_maskz_cvttspd_epi32(__mmask8 __U, __m256d __A) {
return ((__m128i)__builtin_ia32_vcvttpd2dqs256_round_mask(
(__v4df)__A, (__v4si)_mm_setzero_si128(), __U, _MM_FROUND_CUR_DIRECTION));
_mm256_maskz_cvtts_pd_epi32(__mmask8 __U, __m256d __A) {
return ((__m128i)__builtin_ia32_vcvttpd2dqs256_mask(
(__v4df)__A, (__v4si)_mm_setzero_si128(), __U));
}
#define _mm256_cvtts_roundpd_epi32(__A, __R) \
((__m128i)__builtin_ia32_vcvttpd2dqs256_round_mask( \
(__v4df)(__m256d)__A, (__v4si)(__m128i)_mm_undefined_si128(), \
(__mmask8) - 1, (int)(__R)))
#define _mm256_mask_cvtts_roundpd_epi32(__W, __U, __A, __R) \
((__m128i)__builtin_ia32_vcvttpd2dqs256_round_mask( \
(__v4df)(__m256d)__A, (__v4si)(__m128i)__W, (__mmask8)__U, (int)(__R)))
#define _mm256_maskz_cvtts_roundpd_epi32(__U, __A, __R) \
((__m128i)__builtin_ia32_vcvttpd2dqs256_round_mask( \
(__v4df)(__m256d)__A, (__v4si)(__m128i)_mm_setzero_si128(), \
(__mmask8)__U, (int)(__R)))
// 128 Bit : Double -> uint
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_cvttspd_epu32(__m128d __A) {
static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_cvtts_pd_epu32(__m128d __A) {
return ((__m128i)__builtin_ia32_vcvttpd2udqs128_mask(
(__v2df)__A, (__v4si)(__m128i)_mm_undefined_si128(), (__mmask8)(-1)));
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_mask_cvttspd_epu32(__m128i __W, __mmask8 __U, __m128d __A) {
_mm_mask_cvtts_pd_epu32(__m128i __W, __mmask8 __U, __m128d __A) {
return ((__m128i)__builtin_ia32_vcvttpd2udqs128_mask(
(__v2df)__A, (__v4si)(__m128i)__W, (__mmask8)__U));
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_maskz_cvttspd_epu32(__mmask8 __U, __m128d __A) {
_mm_maskz_cvtts_pd_epu32(__mmask8 __U, __m128d __A) {
return ((__m128i)__builtin_ia32_vcvttpd2udqs128_mask(
(__v2df)__A, (__v4si)(__m128i)_mm_setzero_si128(), __U));
}
// 256 Bit : Double -> uint
static __inline__ __m128i __DEFAULT_FN_ATTRS256
_mm256_cvttspd_epu32(__m256d __A) {
return ((__m128i)__builtin_ia32_vcvttpd2udqs256_round_mask(
(__v4df)__A, (__v4si)_mm_undefined_si128(), (__mmask8)-1,
_MM_FROUND_CUR_DIRECTION));
_mm256_cvtts_pd_epu32(__m256d __A) {
return ((__m128i)__builtin_ia32_vcvttpd2udqs256_mask(
(__v4df)__A, (__v4si)_mm_undefined_si128(), (__mmask8)-1));
}
static __inline__ __m128i __DEFAULT_FN_ATTRS256
_mm256_mask_cvttspd_epu32(__m128i __W, __mmask8 __U, __m256d __A) {
return ((__m128i)__builtin_ia32_vcvttpd2udqs256_round_mask(
(__v4df)__A, (__v4si)__W, __U, _MM_FROUND_CUR_DIRECTION));
_mm256_mask_cvtts_pd_epu32(__m128i __W, __mmask8 __U, __m256d __A) {
return ((__m128i)__builtin_ia32_vcvttpd2udqs256_mask((__v4df)__A, (__v4si)__W,
__U));
}
static __inline__ __m128i __DEFAULT_FN_ATTRS256
_mm256_maskz_cvttspd_epu32(__mmask8 __U, __m256d __A) {
return ((__m128i)__builtin_ia32_vcvttpd2udqs256_round_mask(
(__v4df)__A, (__v4si)_mm_setzero_si128(), __U, _MM_FROUND_CUR_DIRECTION));
_mm256_maskz_cvtts_pd_epu32(__mmask8 __U, __m256d __A) {
return ((__m128i)__builtin_ia32_vcvttpd2udqs256_mask(
(__v4df)__A, (__v4si)_mm_setzero_si128(), __U));
}
#define _mm256_cvtts_roundpd_epu32(__A, __R) \
((__m128i)__builtin_ia32_vcvttpd2udqs256_round_mask( \
(__v4df)(__m256d)__A, (__v4si)(__m128i)_mm_undefined_si128(), \
(__mmask8) - 1, (int)(__R)))
#define _mm256_mask_cvtts_roundpd_epu32(__W, __U, __A, __R) \
((__m128i)__builtin_ia32_vcvttpd2udqs256_round_mask( \
(__v4df)(__m256d)__A, (__v4si)(__m128i)__W, (__mmask8)__U, (int)(__R)))
#define _mm256_maskz_cvtts_roundpd_epu32(__U, __A, __R) \
((__m128i)__builtin_ia32_vcvttpd2udqs256_round_mask( \
(__v4df)(__m256d)__A, (__v4si)(__m128i)_mm_setzero_si128(), \
(__mmask8)__U, (int)(__R)))
// 128 Bit : Double -> long
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_cvttspd_epi64(__m128d __A) {
static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_cvtts_pd_epi64(__m128d __A) {
return ((__m128i)__builtin_ia32_vcvttpd2qqs128_mask(
(__v2df)__A, (__v2di)_mm_undefined_si128(), (__mmask8)-1));
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_mask_cvttspd_epi64(__m128i __W, __mmask8 __U, __m128d __A) {
_mm_mask_cvtts_pd_epi64(__m128i __W, __mmask8 __U, __m128d __A) {
return ((__m128i)__builtin_ia32_vcvttpd2qqs128_mask((__v2df)__A, (__v2di)__W,
(__mmask8)__U));
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_maskz_cvttspd_epi64(__mmask8 __U, __m128d __A) {
_mm_maskz_cvtts_pd_epi64(__mmask8 __U, __m128d __A) {
return ((__m128i)__builtin_ia32_vcvttpd2qqs128_mask(
(__v2df)__A, (__v2di)_mm_setzero_si128(), (__mmask8)__U));
}
// 256 Bit : Double -> long
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_cvttspd_epi64(__m256d __A) {
return ((__m256i)__builtin_ia32_vcvttpd2qqs256_round_mask(
(__v4df)__A, (__v4di)_mm256_undefined_si256(), (__mmask8)-1,
_MM_FROUND_CUR_DIRECTION));
_mm256_cvtts_pd_epi64(__m256d __A) {
return ((__m256i)__builtin_ia32_vcvttpd2qqs256_mask(
(__v4df)__A, (__v4di)_mm256_undefined_si256(), (__mmask8)-1));
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mask_cvttspd_epi64(__m256i __W, __mmask8 __U, __m256d __A) {
return ((__m256i)__builtin_ia32_vcvttpd2qqs256_round_mask(
(__v4df)__A, (__v4di)__W, __U, _MM_FROUND_CUR_DIRECTION));
_mm256_mask_cvtts_pd_epi64(__m256i __W, __mmask8 __U, __m256d __A) {
return ((__m256i)__builtin_ia32_vcvttpd2qqs256_mask((__v4df)__A, (__v4di)__W,
__U));
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_maskz_cvttspd_epi64(__mmask8 __U, __m256d __A) {
return ((__m256i)__builtin_ia32_vcvttpd2qqs256_round_mask(
(__v4df)__A, (__v4di)_mm256_setzero_si256(), __U,
_MM_FROUND_CUR_DIRECTION));
_mm256_maskz_cvtts_pd_epi64(__mmask8 __U, __m256d __A) {
return ((__m256i)__builtin_ia32_vcvttpd2qqs256_mask(
(__v4df)__A, (__v4di)_mm256_setzero_si256(), __U));
}
#define _mm256_cvtts_roundpd_epi64(__A, __R) \
((__m256i)__builtin_ia32_vcvttpd2qqs256_round_mask( \
(__v4df)__A, (__v4di)_mm256_undefined_si256(), (__mmask8) - 1, \
(int)__R))
#define _mm256_mask_cvtts_roundpd_epi64(__W, __U, __A, __R) \
((__m256i)__builtin_ia32_vcvttpd2qqs256_round_mask((__v4df)__A, (__v4di)__W, \
(__mmask8)__U, (int)__R))
#define _mm256_maskz_cvtts_roundpd_epi64(__U, __A, __R) \
((__m256i)__builtin_ia32_vcvttpd2qqs256_round_mask( \
(__v4df)__A, (__v4di)_mm256_setzero_si256(), (__mmask8)__U, (int)__R))
// 128 Bit : Double -> ulong
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_cvttspd_epu64(__m128d __A) {
static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_cvtts_pd_epu64(__m128d __A) {
return ((__m128i)__builtin_ia32_vcvttpd2uqqs128_mask(
(__v2df)__A, (__v2di)_mm_undefined_si128(), (__mmask8)-1));
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_mask_cvttspd_epu64(__m128i __W, __mmask8 __U, __m128d __A) {
_mm_mask_cvtts_pd_epu64(__m128i __W, __mmask8 __U, __m128d __A) {
return ((__m128i)__builtin_ia32_vcvttpd2uqqs128_mask((__v2df)__A, (__v2di)__W,
(__mmask8)__U));
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_maskz_cvttspd_epu64(__mmask8 __U, __m128d __A) {
_mm_maskz_cvtts_pd_epu64(__mmask8 __U, __m128d __A) {
return ((__m128i)__builtin_ia32_vcvttpd2uqqs128_mask(
(__v2df)__A, (__v2di)_mm_setzero_si128(), (__mmask8)__U));
}
@ -247,105 +206,74 @@ _mm_maskz_cvttspd_epu64(__mmask8 __U, __m128d __A) {
// 256 Bit : Double -> ulong
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_cvttspd_epu64(__m256d __A) {
return ((__m256i)__builtin_ia32_vcvttpd2uqqs256_round_mask(
(__v4df)__A, (__v4di)_mm256_undefined_si256(), (__mmask8)-1,
_MM_FROUND_CUR_DIRECTION));
_mm256_cvtts_pd_epu64(__m256d __A) {
return ((__m256i)__builtin_ia32_vcvttpd2uqqs256_mask(
(__v4df)__A, (__v4di)_mm256_undefined_si256(), (__mmask8)-1));
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mask_cvttspd_epu64(__m256i __W, __mmask8 __U, __m256d __A) {
return ((__m256i)__builtin_ia32_vcvttpd2uqqs256_round_mask(
(__v4df)__A, (__v4di)__W, __U, _MM_FROUND_CUR_DIRECTION));
_mm256_mask_cvtts_pd_epu64(__m256i __W, __mmask8 __U, __m256d __A) {
return ((__m256i)__builtin_ia32_vcvttpd2uqqs256_mask((__v4df)__A, (__v4di)__W,
__U));
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_maskz_cvttspd_epu64(__mmask8 __U, __m256d __A) {
return ((__m256i)__builtin_ia32_vcvttpd2uqqs256_round_mask(
(__v4df)__A, (__v4di)_mm256_setzero_si256(), __U,
_MM_FROUND_CUR_DIRECTION));
_mm256_maskz_cvtts_pd_epu64(__mmask8 __U, __m256d __A) {
return ((__m256i)__builtin_ia32_vcvttpd2uqqs256_mask(
(__v4df)__A, (__v4di)_mm256_setzero_si256(), __U));
}
#define _mm256_cvtts_roundpd_epu64(__A, __R) \
((__m256i)__builtin_ia32_vcvttpd2uqqs256_round_mask( \
(__v4df)__A, (__v4di)_mm256_undefined_si256(), (__mmask8) - 1, \
(int)__R))
#define _mm256_mask_cvtts_roundpd_epu64(__W, __U, __A, __R) \
((__m256i)__builtin_ia32_vcvttpd2uqqs256_round_mask( \
(__v4df)__A, (__v4di)__W, (__mmask8)__U, (int)__R))
#define _mm256_maskz_cvtts_roundpd_epu64(__U, __A, __R) \
((__m256i)__builtin_ia32_vcvttpd2uqqs256_round_mask( \
(__v4df)__A, (__v4di)_mm256_setzero_si256(), (__mmask8)__U, (int)__R))
// 128 Bit : float -> int
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_cvttsps_epi32(__m128 __A) {
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_cvtts_ps_epi32(__m128 __A) {
return ((__m128i)__builtin_ia32_vcvttps2dqs128_mask(
(__v4sf)__A, (__v4si)(__m128i)_mm_undefined_si128(), (__mmask8)(-1)));
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_mask_cvttsps_epi32(__m128i __W, __mmask8 __U, __m128 __A) {
_mm_mask_cvtts_ps_epi32(__m128i __W, __mmask8 __U, __m128 __A) {
return ((__m128i)__builtin_ia32_vcvttps2dqs128_mask((__v4sf)__A, (__v4si)__W,
(__mmask8)__U));
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_maskz_cvttsps_epi32(__mmask8 __U, __m128 __A) {
_mm_maskz_cvtts_ps_epi32(__mmask8 __U, __m128 __A) {
return ((__m128i)__builtin_ia32_vcvttps2dqs128_mask(
(__v4sf)__A, (__v4si)(__m128i)_mm_setzero_si128(), (__mmask8)__U));
}
// 256 Bit : float -> int
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_cvttsps_epi32(__m256 __A) {
return ((__m256i)__builtin_ia32_vcvttps2dqs256_round_mask(
(__v8sf)__A, (__v8si)_mm256_undefined_si256(), (__mmask8)-1,
_MM_FROUND_CUR_DIRECTION));
_mm256_cvtts_ps_epi32(__m256 __A) {
return ((__m256i)__builtin_ia32_vcvttps2dqs256_mask(
(__v8sf)__A, (__v8si)_mm256_undefined_si256(), (__mmask8)-1));
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mask_cvttsps_epi32(__m256i __W, __mmask8 __U, __m256 __A) {
return ((__m256i)__builtin_ia32_vcvttps2dqs256_round_mask(
(__v8sf)__A, (__v8si)__W, __U, _MM_FROUND_CUR_DIRECTION));
_mm256_mask_cvtts_ps_epi32(__m256i __W, __mmask8 __U, __m256 __A) {
return ((__m256i)__builtin_ia32_vcvttps2dqs256_mask((__v8sf)__A, (__v8si)__W,
__U));
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_maskz_cvttsps_epi32(__mmask8 __U, __m256 __A) {
return ((__m256i)__builtin_ia32_vcvttps2dqs256_round_mask(
(__v8sf)__A, (__v8si)_mm256_setzero_si256(), __U,
_MM_FROUND_CUR_DIRECTION));
_mm256_maskz_cvtts_ps_epi32(__mmask8 __U, __m256 __A) {
return ((__m256i)__builtin_ia32_vcvttps2dqs256_mask(
(__v8sf)__A, (__v8si)_mm256_setzero_si256(), __U));
}
#define _mm256_cvtts_roundps_epi32(__A, __R) \
((__m256i)__builtin_ia32_vcvttps2dqs256_round_mask( \
(__v8sf)(__m256)__A, (__v8si)(__m256i)_mm256_undefined_si256(), \
(__mmask8) - 1, (int)(__R)))
#define _mm256_mask_cvtts_roundps_epi32(__W, __U, __A, __R) \
((__m256i)__builtin_ia32_vcvttps2dqs256_round_mask( \
(__v8sf)(__m256)__A, (__v8si)(__m256i)__W, (__mmask8)__U, (int)(__R)))
#define _mm256_maskz_cvtts_roundps_epi32(__U, __A, __R) \
((__m256i)__builtin_ia32_vcvttps2dqs256_round_mask( \
(__v8sf)(__m256)__A, (__v8si)(__m256i)_mm256_setzero_si256(), \
(__mmask8)__U, (int)(__R)))
// 128 Bit : float -> uint
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_cvttsps_epu32(__m128 __A) {
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_cvtts_ps_epu32(__m128 __A) {
return ((__m128i)__builtin_ia32_vcvttps2udqs128_mask(
(__v4sf)__A, (__v4si)(__m128i)_mm_undefined_si128(), (__mmask8)(-1)));
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_mask_cvttsps_epu32(__m128i __W, __mmask8 __U, __m128 __A) {
_mm_mask_cvtts_ps_epu32(__m128i __W, __mmask8 __U, __m128 __A) {
return ((__m128i)__builtin_ia32_vcvttps2udqs128_mask((__v4sf)__A, (__v4si)__W,
(__mmask8)__U));
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_maskz_cvttsps_epu32(__mmask8 __U, __m128 __A) {
_mm_maskz_cvtts_ps_epu32(__mmask8 __U, __m128 __A) {
return ((__m128i)__builtin_ia32_vcvttps2udqs128_mask(
(__v4sf)__A, (__v4si)_mm_setzero_si128(), (__mmask8)__U));
}
@ -353,144 +281,96 @@ _mm_maskz_cvttsps_epu32(__mmask8 __U, __m128 __A) {
// 256 Bit : float -> uint
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_cvttsps_epu32(__m256 __A) {
return ((__m256i)__builtin_ia32_vcvttps2udqs256_round_mask(
(__v8sf)__A, (__v8si)_mm256_undefined_si256(), (__mmask8)-1,
_MM_FROUND_CUR_DIRECTION));
_mm256_cvtts_ps_epu32(__m256 __A) {
return ((__m256i)__builtin_ia32_vcvttps2udqs256_mask(
(__v8sf)__A, (__v8si)_mm256_undefined_si256(), (__mmask8)-1));
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mask_cvttsps_epu32(__m256i __W, __mmask8 __U, __m256 __A) {
return ((__m256i)__builtin_ia32_vcvttps2udqs256_round_mask(
(__v8sf)__A, (__v8si)__W, __U, _MM_FROUND_CUR_DIRECTION));
_mm256_mask_cvtts_ps_epu32(__m256i __W, __mmask8 __U, __m256 __A) {
return ((__m256i)__builtin_ia32_vcvttps2udqs256_mask((__v8sf)__A, (__v8si)__W,
__U));
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_maskz_cvttsps_epu32(__mmask8 __U, __m256 __A) {
return ((__m256i)__builtin_ia32_vcvttps2udqs256_round_mask(
(__v8sf)__A, (__v8si)_mm256_setzero_si256(), __U,
_MM_FROUND_CUR_DIRECTION));
_mm256_maskz_cvtts_ps_epu32(__mmask8 __U, __m256 __A) {
return ((__m256i)__builtin_ia32_vcvttps2udqs256_mask(
(__v8sf)__A, (__v8si)_mm256_setzero_si256(), __U));
}
#define _mm256_cvtts_roundps_epu32(__A, __R) \
((__m256i)__builtin_ia32_vcvttps2udqs256_round_mask( \
(__v8sf)(__m256)__A, (__v8si)(__m256i)_mm256_undefined_si256(), \
(__mmask8) - 1, (int)(__R)))
#define _mm256_mask_cvtts_roundps_epu32(__W, __U, __A, __R) \
((__m256i)__builtin_ia32_vcvttps2udqs256_round_mask( \
(__v8sf)(__m256)__A, (__v8si)(__m256i)__W, (__mmask8)__U, (int)(__R)))
#define _mm256_maskz_cvtts_roundps_epu32(__U, __A, __R) \
((__m256i)__builtin_ia32_vcvttps2udqs256_round_mask( \
(__v8sf)(__m256)__A, (__v8si)(__m256i)_mm256_setzero_si256(), \
(__mmask8)__U, (int)(__R)))
// 128 bit : float -> long
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_cvttsps_epi64(__m128 __A) {
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_cvtts_ps_epi64(__m128 __A) {
return ((__m128i)__builtin_ia32_vcvttps2qqs128_mask(
(__v4sf)__A, (__v2di)_mm_undefined_si128(), (__mmask8)-1));
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_mask_cvttsps_epi64(__m128i __W, __mmask8 __U, __m128 __A) {
_mm_mask_cvtts_ps_epi64(__m128i __W, __mmask8 __U, __m128 __A) {
return ((__m128i)__builtin_ia32_vcvttps2qqs128_mask(
(__v4sf)__A, (__v2di)(__m128i)__W, (__mmask8)__U));
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_maskz_cvttsps_epi64(__mmask8 __U, __m128 __A) {
_mm_maskz_cvtts_ps_epi64(__mmask8 __U, __m128 __A) {
return ((__m128i)__builtin_ia32_vcvttps2qqs128_mask(
(__v4sf)__A, (__v2di)_mm_setzero_si128(), (__mmask8)__U));
}
// 256 bit : float -> long
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_cvttsps_epi64(__m128 __A) {
return ((__m256i)__builtin_ia32_vcvttps2qqs256_round_mask(
(__v4sf)__A, (__v4di)_mm256_undefined_si256(), (__mmask8)-1,
_MM_FROUND_CUR_DIRECTION));
_mm256_cvtts_ps_epi64(__m128 __A) {
return ((__m256i)__builtin_ia32_vcvttps2qqs256_mask(
(__v4sf)__A, (__v4di)_mm256_undefined_si256(), (__mmask8)-1));
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mask_cvttsps_epi64(__m256i __W, __mmask8 __U, __m128 __A) {
return ((__m256i)__builtin_ia32_vcvttps2qqs256_round_mask(
(__v4sf)__A, (__v4di)__W, __U, _MM_FROUND_CUR_DIRECTION));
_mm256_mask_cvtts_ps_epi64(__m256i __W, __mmask8 __U, __m128 __A) {
return ((__m256i)__builtin_ia32_vcvttps2qqs256_mask((__v4sf)__A, (__v4di)__W,
__U));
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_maskz_cvttsps_epi64(__mmask8 __U, __m128 __A) {
return ((__m256i)__builtin_ia32_vcvttps2qqs256_round_mask(
(__v4sf)__A, (__v4di)_mm256_setzero_si256(), __U,
_MM_FROUND_CUR_DIRECTION));
_mm256_maskz_cvtts_ps_epi64(__mmask8 __U, __m128 __A) {
return ((__m256i)__builtin_ia32_vcvttps2qqs256_mask(
(__v4sf)__A, (__v4di)_mm256_setzero_si256(), __U));
}
#define _mm256_cvtts_roundps_epi64(__A, __R) \
((__m256i)__builtin_ia32_vcvttps2qqs256_round_mask( \
(__v4sf)(__m128)__A, (__v4di)_mm256_undefined_si256(), (__mmask8) - 1, \
(int)__R))
#define _mm256_mask_cvtts_roundps_epi64(__W, __U, __A, __R) \
((__m256i)__builtin_ia32_vcvttps2qqs256_round_mask( \
(__v4sf)(__m128)__A, (__v4di)__W, (__mmask8)__U, (int)__R))
#define _mm256_maskz_cvtts_roundps_epi64(__U, __A, __R) \
((__m256i)__builtin_ia32_vcvttps2qqs256_round_mask( \
(__v4sf)(__m128)__A, (__v4di)_mm256_setzero_si256(), (__mmask8)__U, \
(int)__R))
// 128 bit : float -> ulong
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_cvttsps_epu64(__m128 __A) {
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_cvtts_ps_epu64(__m128 __A) {
return ((__m128i)__builtin_ia32_vcvttps2uqqs128_mask(
(__v4sf)__A, (__v2di)_mm_undefined_si128(), (__mmask8)-1));
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_mask_cvttsps_epu64(__m128i __W, __mmask8 __U, __m128 __A) {
_mm_mask_cvtts_ps_epu64(__m128i __W, __mmask8 __U, __m128 __A) {
return ((__m128i)__builtin_ia32_vcvttps2uqqs128_mask(
(__v4sf)__A, (__v2di)(__m128i)__W, (__mmask8)__U));
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_maskz_cvttsps_epu64(__mmask8 __U, __m128 __A) {
_mm_maskz_cvtts_ps_epu64(__mmask8 __U, __m128 __A) {
return ((__m128i)__builtin_ia32_vcvttps2uqqs128_mask(
(__v4sf)__A, (__v2di)_mm_setzero_si128(), (__mmask8)__U));
}
// 256 bit : float -> ulong
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_cvttsps_epu64(__m128 __A) {
return ((__m256i)__builtin_ia32_vcvttps2uqqs256_round_mask(
(__v4sf)__A, (__v4di)_mm256_undefined_si256(), (__mmask8)-1,
_MM_FROUND_CUR_DIRECTION));
_mm256_cvtts_ps_epu64(__m128 __A) {
return ((__m256i)__builtin_ia32_vcvttps2uqqs256_mask(
(__v4sf)__A, (__v4di)_mm256_undefined_si256(), (__mmask8)-1));
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mask_cvttsps_epu64(__m256i __W, __mmask8 __U, __m128 __A) {
return ((__m256i)__builtin_ia32_vcvttps2uqqs256_round_mask(
(__v4sf)__A, (__v4di)__W, __U, _MM_FROUND_CUR_DIRECTION));
_mm256_mask_cvtts_ps_epu64(__m256i __W, __mmask8 __U, __m128 __A) {
return ((__m256i)__builtin_ia32_vcvttps2uqqs256_mask((__v4sf)__A, (__v4di)__W,
__U));
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_maskz_cvttsps_epu64(__mmask8 __U, __m128 __A) {
return ((__m256i)__builtin_ia32_vcvttps2uqqs256_round_mask(
(__v4sf)__A, (__v4di)_mm256_setzero_si256(), __U,
_MM_FROUND_CUR_DIRECTION));
_mm256_maskz_cvtts_ps_epu64(__mmask8 __U, __m128 __A) {
return ((__m256i)__builtin_ia32_vcvttps2uqqs256_mask(
(__v4sf)__A, (__v4di)_mm256_setzero_si256(), __U));
}
#define _mm256_cvtts_roundps_epu64(__A, __R) \
((__m256i)__builtin_ia32_vcvttps2uqqs256_round_mask( \
(__v4sf)(__m128)__A, (__v4di)_mm256_undefined_si256(), (__mmask8) - 1, \
(int)__R))
#define _mm256_mask_cvtts_roundps_epu64(__W, __U, __A, __R) \
((__m256i)__builtin_ia32_vcvttps2uqqs256_round_mask( \
(__v4sf)(__m128)__A, (__v4di)__W, (__mmask8)__U, (int)__R))
#define _mm256_maskz_cvtts_roundps_epu64(__U, __A, __R) \
((__m256i)__builtin_ia32_vcvttps2uqqs256_round_mask( \
(__v4sf)(__m128)__A, (__v4di)_mm256_setzero_si256(), (__mmask8)__U, \
(int)__R))
#undef __DEFAULT_FN_ATTRS128
#undef __DEFAULT_FN_ATTRS256
#endif // __AVX10_2SATCVTDSINTRIN_H

View File

@ -14,431 +14,299 @@
#ifndef __AVX10_2SATCVTINTRIN_H
#define __AVX10_2SATCVTINTRIN_H
#define _mm_ipcvtbf16_epi8(A) \
#define _mm_ipcvts_bf16_epi8(A) \
((__m128i)__builtin_ia32_vcvtbf162ibs128((__v8bf)(__m128bh)(A)))
#define _mm_mask_ipcvtbf16_epi8(W, U, A) \
#define _mm_mask_ipcvts_bf16_epi8(W, U, A) \
((__m128i)__builtin_ia32_selectw_128( \
(__mmask8)(U), (__v8hi)_mm_ipcvtbf16_epi8(A), (__v8hi)(__m128i)(W)))
(__mmask8)(U), (__v8hi)_mm_ipcvts_bf16_epi8(A), (__v8hi)(__m128i)(W)))
#define _mm_maskz_ipcvtbf16_epi8(U, A) \
#define _mm_maskz_ipcvts_bf16_epi8(U, A) \
((__m128i)__builtin_ia32_selectw_128((__mmask8)(U), \
(__v8hi)_mm_ipcvtbf16_epi8(A), \
(__v8hi)_mm_ipcvts_bf16_epi8(A), \
(__v8hi)_mm_setzero_si128()))
#define _mm256_ipcvtbf16_epi8(A) \
#define _mm256_ipcvts_bf16_epi8(A) \
((__m256i)__builtin_ia32_vcvtbf162ibs256((__v16bf)(__m256bh)(A)))
#define _mm256_mask_ipcvtbf16_epi8(W, U, A) \
#define _mm256_mask_ipcvts_bf16_epi8(W, U, A) \
((__m256i)__builtin_ia32_selectw_256((__mmask16)(U), \
(__v16hi)_mm256_ipcvtbf16_epi8(A), \
(__v16hi)_mm256_ipcvts_bf16_epi8(A), \
(__v16hi)(__m256i)(W)))
#define _mm256_maskz_ipcvtbf16_epi8(U, A) \
#define _mm256_maskz_ipcvts_bf16_epi8(U, A) \
((__m256i)__builtin_ia32_selectw_256((__mmask16)(U), \
(__v16hi)_mm256_ipcvtbf16_epi8(A), \
(__v16hi)_mm256_ipcvts_bf16_epi8(A), \
(__v16hi)_mm256_setzero_si256()))
#define _mm_ipcvtbf16_epu8(A) \
#define _mm_ipcvts_bf16_epu8(A) \
((__m128i)__builtin_ia32_vcvtbf162iubs128((__v8bf)(__m128bh)(A)))
#define _mm_mask_ipcvtbf16_epu8(W, U, A) \
#define _mm_mask_ipcvts_bf16_epu8(W, U, A) \
((__m128i)__builtin_ia32_selectw_128( \
(__mmask8)(U), (__v8hi)_mm_ipcvtbf16_epu8(A), (__v8hi)(__m128i)(W)))
(__mmask8)(U), (__v8hi)_mm_ipcvts_bf16_epu8(A), (__v8hi)(__m128i)(W)))
#define _mm_maskz_ipcvtbf16_epu8(U, A) \
#define _mm_maskz_ipcvts_bf16_epu8(U, A) \
((__m128i)__builtin_ia32_selectw_128((__mmask8)(U), \
(__v8hi)_mm_ipcvtbf16_epu8(A), \
(__v8hi)_mm_ipcvts_bf16_epu8(A), \
(__v8hi)_mm_setzero_si128()))
#define _mm256_ipcvtbf16_epu8(A) \
#define _mm256_ipcvts_bf16_epu8(A) \
((__m256i)__builtin_ia32_vcvtbf162iubs256((__v16bf)(__m256bh)(A)))
#define _mm256_mask_ipcvtbf16_epu8(W, U, A) \
#define _mm256_mask_ipcvts_bf16_epu8(W, U, A) \
((__m256i)__builtin_ia32_selectw_256((__mmask16)(U), \
(__v16hi)_mm256_ipcvtbf16_epu8(A), \
(__v16hi)_mm256_ipcvts_bf16_epu8(A), \
(__v16hi)(__m256i)(W)))
#define _mm256_maskz_ipcvtbf16_epu8(U, A) \
#define _mm256_maskz_ipcvts_bf16_epu8(U, A) \
((__m256i)__builtin_ia32_selectw_256((__mmask16)(U), \
(__v16hi)_mm256_ipcvtbf16_epu8(A), \
(__v16hi)_mm256_ipcvts_bf16_epu8(A), \
(__v16hi)_mm256_setzero_si256()))
#define _mm_ipcvtph_epi8(A) \
#define _mm_ipcvts_ph_epi8(A) \
((__m128i)__builtin_ia32_vcvtph2ibs128_mask( \
(__v8hf)(__m128h)(A), (__v8hu)_mm_setzero_si128(), (__mmask8)-1))
#define _mm_mask_ipcvtph_epi8(W, U, A) \
#define _mm_mask_ipcvts_ph_epi8(W, U, A) \
((__m128i)__builtin_ia32_vcvtph2ibs128_mask((__v8hf)(__m128h)(A), \
(__v8hu)(W), (__mmask8)(U)))
#define _mm_maskz_ipcvtph_epi8(U, A) \
#define _mm_maskz_ipcvts_ph_epi8(U, A) \
((__m128i)__builtin_ia32_vcvtph2ibs128_mask( \
(__v8hf)(__m128h)(A), (__v8hu)(_mm_setzero_si128()), (__mmask8)(U)))
#define _mm256_ipcvtph_epi8(A) \
#define _mm256_ipcvts_ph_epi8(A) \
((__m256i)__builtin_ia32_vcvtph2ibs256_mask( \
(__v16hf)(__m256h)(A), (__v16hu)_mm256_setzero_si256(), (__mmask16)-1, \
_MM_FROUND_CUR_DIRECTION))
(__v16hf)(__m256h)(A), (__v16hu)_mm256_setzero_si256(), (__mmask16)-1))
#define _mm256_mask_ipcvtph_epi8(W, U, A) \
#define _mm256_mask_ipcvts_ph_epi8(W, U, A) \
((__m256i)__builtin_ia32_vcvtph2ibs256_mask((__v16hf)(__m256h)(A), \
(__v16hu)(W), (__mmask16)(U), \
_MM_FROUND_CUR_DIRECTION))
(__v16hu)(W), (__mmask16)(U)))
#define _mm256_maskz_ipcvtph_epi8(U, A) \
#define _mm256_maskz_ipcvts_ph_epi8(U, A) \
((__m256i)__builtin_ia32_vcvtph2ibs256_mask( \
(__v16hf)(__m256h)(A), (__v16hu)(_mm256_setzero_si256()), \
(__mmask16)(U), _MM_FROUND_CUR_DIRECTION))
(__mmask16)(U)))
#define _mm256_ipcvt_roundph_epi8(A, R) \
((__m256i)__builtin_ia32_vcvtph2ibs256_mask((__v16hf)(__m256h)(A), \
(__v16hu)_mm256_setzero_si256(), \
(__mmask16)-1, (const int)R))
#define _mm256_mask_ipcvt_roundph_epi8(W, U, A, R) \
((__m256i)__builtin_ia32_vcvtph2ibs256_mask( \
(__v16hf)(__m256h)(A), (__v16hu)(W), (__mmask16)(U), (const int)R))
#define _mm256_maskz_ipcvt_roundph_epi8(U, A, R) \
((__m256i)__builtin_ia32_vcvtph2ibs256_mask((__v16hf)(__m256h)(A), \
(__v16hu)_mm256_setzero_si256(), \
(__mmask16)(U), (const int)R))
#define _mm_ipcvtph_epu8(A) \
#define _mm_ipcvts_ph_epu8(A) \
((__m128i)__builtin_ia32_vcvtph2iubs128_mask( \
(__v8hf)(__m128h)(A), (__v8hu)_mm_setzero_si128(), (__mmask8)-1))
#define _mm_mask_ipcvtph_epu8(W, U, A) \
#define _mm_mask_ipcvts_ph_epu8(W, U, A) \
((__m128i)__builtin_ia32_vcvtph2iubs128_mask((__v8hf)(__m128h)(A), \
(__v8hu)(W), (__mmask8)(U)))
#define _mm_maskz_ipcvtph_epu8(U, A) \
#define _mm_maskz_ipcvts_ph_epu8(U, A) \
((__m128i)__builtin_ia32_vcvtph2iubs128_mask( \
(__v8hf)(__m128h)(A), (__v8hu)(_mm_setzero_si128()), (__mmask8)(U)))
#define _mm256_ipcvtph_epu8(A) \
#define _mm256_ipcvts_ph_epu8(A) \
((__m256i)__builtin_ia32_vcvtph2iubs256_mask( \
(__v16hf)(__m256h)(A), (__v16hu)_mm256_setzero_si256(), (__mmask16)-1, \
_MM_FROUND_CUR_DIRECTION))
(__v16hf)(__m256h)(A), (__v16hu)_mm256_setzero_si256(), (__mmask16)-1))
#define _mm256_mask_ipcvtph_epu8(W, U, A) \
#define _mm256_mask_ipcvts_ph_epu8(W, U, A) \
((__m256i)__builtin_ia32_vcvtph2iubs256_mask((__v16hf)(__m256h)(A), \
(__v16hu)(W), (__mmask16)(U), \
_MM_FROUND_CUR_DIRECTION))
(__v16hu)(W), (__mmask16)(U)))
#define _mm256_maskz_ipcvtph_epu8(U, A) \
#define _mm256_maskz_ipcvts_ph_epu8(U, A) \
((__m256i)__builtin_ia32_vcvtph2iubs256_mask( \
(__v16hf)(__m256h)(A), (__v16hu)(_mm256_setzero_si256()), \
(__mmask16)(U), _MM_FROUND_CUR_DIRECTION))
(__mmask16)(U)))
#define _mm256_ipcvt_roundph_epu8(A, R) \
((__m256i)__builtin_ia32_vcvtph2iubs256_mask( \
(__v16hf)(__m256h)(A), (__v16hu)_mm256_setzero_si256(), (__mmask16)-1, \
(const int)R))
#define _mm256_mask_ipcvt_roundph_epu8(W, U, A, R) \
((__m256i)__builtin_ia32_vcvtph2iubs256_mask( \
(__v16hf)(__m256h)(A), (__v16hu)(W), (__mmask16)(U), (const int)R))
#define _mm256_maskz_ipcvt_roundph_epu8(U, A, R) \
((__m256i)__builtin_ia32_vcvtph2iubs256_mask( \
(__v16hf)(__m256h)(A), (__v16hu)_mm256_setzero_si256(), (__mmask16)(U), \
(const int)R))
#define _mm_ipcvtps_epi8(A) \
#define _mm_ipcvts_ps_epi8(A) \
((__m128i)__builtin_ia32_vcvtps2ibs128_mask( \
(__v4sf)(__m128)(A), (__v4su)_mm_setzero_si128(), (__mmask8)-1))
#define _mm_mask_ipcvtps_epi8(W, U, A) \
#define _mm_mask_ipcvts_ps_epi8(W, U, A) \
((__m128i)__builtin_ia32_vcvtps2ibs128_mask((__v4sf)(__m128)(A), \
(__v4su)(W), (__mmask8)(U)))
#define _mm_maskz_ipcvtps_epi8(U, A) \
#define _mm_maskz_ipcvts_ps_epi8(U, A) \
((__m128i)__builtin_ia32_vcvtps2ibs128_mask( \
(__v4sf)(__m128)(A), (__v4su)(_mm_setzero_si128()), (__mmask8)(U)))
#define _mm256_ipcvtps_epi8(A) \
#define _mm256_ipcvts_ps_epi8(A) \
((__m256i)__builtin_ia32_vcvtps2ibs256_mask( \
(__v8sf)(__m256)(A), (__v8su)_mm256_setzero_si256(), (__mmask8)-1, \
_MM_FROUND_CUR_DIRECTION))
(__v8sf)(__m256)(A), (__v8su)_mm256_setzero_si256(), (__mmask8)-1))
#define _mm256_mask_ipcvtps_epi8(W, U, A) \
#define _mm256_mask_ipcvts_ps_epi8(W, U, A) \
((__m256i)__builtin_ia32_vcvtps2ibs256_mask((__v8sf)(__m256)(A), \
(__v8su)(W), (__mmask8)(U), \
_MM_FROUND_CUR_DIRECTION))
(__v8su)(W), (__mmask8)(U)))
#define _mm256_maskz_ipcvtps_epi8(U, A) \
#define _mm256_maskz_ipcvts_ps_epi8(U, A) \
((__m256i)__builtin_ia32_vcvtps2ibs256_mask( \
(__v8sf)(__m256)(A), (__v8su)(_mm256_setzero_si256()), (__mmask8)(U), \
_MM_FROUND_CUR_DIRECTION))
(__v8sf)(__m256)(A), (__v8su)(_mm256_setzero_si256()), (__mmask8)(U)))
#define _mm256_ipcvt_roundps_epi8(A, R) \
((__m256i)__builtin_ia32_vcvtps2ibs256_mask((__v8sf)(__m256)(A), \
(__v8su)_mm256_setzero_si256(), \
(__mmask8)-1, (const int)R))
#define _mm256_mask_ipcvt_roundps_epi8(W, U, A, R) \
((__m256i)__builtin_ia32_vcvtps2ibs256_mask( \
(__v8sf)(__m256)(A), (__v8su)(W), (__mmask8)(U), (const int)R))
#define _mm256_maskz_ipcvt_roundps_epi8(U, A, R) \
((__m256i)__builtin_ia32_vcvtps2ibs256_mask((__v8sf)(__m256)(A), \
(__v8su)_mm256_setzero_si256(), \
(__mmask8)(U), (const int)R))
#define _mm_ipcvtps_epu8(A) \
#define _mm_ipcvts_ps_epu8(A) \
((__m128i)__builtin_ia32_vcvtps2iubs128_mask( \
(__v4sf)(__m128)(A), (__v4su)_mm_setzero_si128(), (__mmask8)-1))
#define _mm_mask_ipcvtps_epu8(W, U, A) \
#define _mm_mask_ipcvts_ps_epu8(W, U, A) \
((__m128i)__builtin_ia32_vcvtps2iubs128_mask((__v4sf)(__m128)(A), \
(__v4su)(W), (__mmask8)(U)))
#define _mm_maskz_ipcvtps_epu8(U, A) \
#define _mm_maskz_ipcvts_ps_epu8(U, A) \
((__m128i)__builtin_ia32_vcvtps2iubs128_mask( \
(__v4sf)(__m128)(A), (__v4su)(_mm_setzero_si128()), (__mmask8)(U)))
#define _mm256_ipcvtps_epu8(A) \
#define _mm256_ipcvts_ps_epu8(A) \
((__m256i)__builtin_ia32_vcvtps2iubs256_mask( \
(__v8sf)(__m256)(A), (__v8su)_mm256_setzero_si256(), (__mmask8)-1, \
_MM_FROUND_CUR_DIRECTION))
(__v8sf)(__m256)(A), (__v8su)_mm256_setzero_si256(), (__mmask8)-1))
#define _mm256_mask_ipcvtps_epu8(W, U, A) \
#define _mm256_mask_ipcvts_ps_epu8(W, U, A) \
((__m256i)__builtin_ia32_vcvtps2iubs256_mask((__v8sf)(__m256)(A), \
(__v8su)(W), (__mmask8)(U), \
_MM_FROUND_CUR_DIRECTION))
(__v8su)(W), (__mmask8)(U)))
#define _mm256_maskz_ipcvtps_epu8(U, A) \
#define _mm256_maskz_ipcvts_ps_epu8(U, A) \
((__m256i)__builtin_ia32_vcvtps2iubs256_mask( \
(__v8sf)(__m256)(A), (__v8su)(_mm256_setzero_si256()), (__mmask8)(U), \
_MM_FROUND_CUR_DIRECTION))
(__v8sf)(__m256)(A), (__v8su)(_mm256_setzero_si256()), (__mmask8)(U)))
#define _mm256_ipcvt_roundps_epu8(A, R) \
((__m256i)__builtin_ia32_vcvtps2iubs256_mask((__v8sf)(__m256)(A), \
(__v8su)_mm256_setzero_si256(), \
(__mmask8)-1, (const int)R))
#define _mm256_mask_ipcvt_roundps_epu8(W, U, A, R) \
((__m256i)__builtin_ia32_vcvtps2iubs256_mask( \
(__v8sf)(__m256)(A), (__v8su)(W), (__mmask8)(U), (const int)R))
#define _mm256_maskz_ipcvt_roundps_epu8(U, A, R) \
((__m256i)__builtin_ia32_vcvtps2iubs256_mask((__v8sf)(__m256)(A), \
(__v8su)_mm256_setzero_si256(), \
(__mmask8)(U), (const int)R))
#define _mm_ipcvttbf16_epi8(A) \
#define _mm_ipcvtts_bf16_epi8(A) \
((__m128i)__builtin_ia32_vcvttbf162ibs128((__v8bf)(__m128bh)(A)))
#define _mm_mask_ipcvttbf16_epi8(W, U, A) \
#define _mm_mask_ipcvtts_bf16_epi8(W, U, A) \
((__m128i)__builtin_ia32_selectw_128( \
(__mmask8)(U), (__v8hi)_mm_ipcvttbf16_epi8(A), (__v8hi)(__m128i)(W)))
(__mmask8)(U), (__v8hi)_mm_ipcvtts_bf16_epi8(A), (__v8hi)(__m128i)(W)))
#define _mm_maskz_ipcvttbf16_epi8(U, A) \
#define _mm_maskz_ipcvtts_bf16_epi8(U, A) \
((__m128i)__builtin_ia32_selectw_128((__mmask8)(U), \
(__v8hi)_mm_ipcvttbf16_epi8(A), \
(__v8hi)_mm_ipcvtts_bf16_epi8(A), \
(__v8hi)_mm_setzero_si128()))
#define _mm256_ipcvttbf16_epi8(A) \
#define _mm256_ipcvtts_bf16_epi8(A) \
((__m256i)__builtin_ia32_vcvttbf162ibs256((__v16bf)(__m256bh)(A)))
#define _mm256_mask_ipcvttbf16_epi8(W, U, A) \
#define _mm256_mask_ipcvtts_bf16_epi8(W, U, A) \
((__m256i)__builtin_ia32_selectw_256((__mmask16)(U), \
(__v16hi)_mm256_ipcvttbf16_epi8(A), \
(__v16hi)_mm256_ipcvtts_bf16_epi8(A), \
(__v16hi)(__m256i)(W)))
#define _mm256_maskz_ipcvttbf16_epi8(U, A) \
#define _mm256_maskz_ipcvtts_bf16_epi8(U, A) \
((__m256i)__builtin_ia32_selectw_256((__mmask16)(U), \
(__v16hi)_mm256_ipcvttbf16_epi8(A), \
(__v16hi)_mm256_ipcvtts_bf16_epi8(A), \
(__v16hi)_mm256_setzero_si256()))
#define _mm_ipcvttbf16_epu8(A) \
#define _mm_ipcvtts_bf16_epu8(A) \
((__m128i)__builtin_ia32_vcvttbf162iubs128((__v8bf)(__m128bh)(A)))
#define _mm_mask_ipcvttbf16_epu8(W, U, A) \
#define _mm_mask_ipcvtts_bf16_epu8(W, U, A) \
((__m128i)__builtin_ia32_selectw_128( \
(__mmask8)(U), (__v8hi)_mm_ipcvttbf16_epu8(A), (__v8hi)(__m128i)(W)))
(__mmask8)(U), (__v8hi)_mm_ipcvtts_bf16_epu8(A), (__v8hi)(__m128i)(W)))
#define _mm_maskz_ipcvttbf16_epu8(U, A) \
#define _mm_maskz_ipcvtts_bf16_epu8(U, A) \
((__m128i)__builtin_ia32_selectw_128((__mmask8)(U), \
(__v8hi)_mm_ipcvttbf16_epu8(A), \
(__v8hi)_mm_ipcvtts_bf16_epu8(A), \
(__v8hi)_mm_setzero_si128()))
#define _mm256_ipcvttbf16_epu8(A) \
#define _mm256_ipcvtts_bf16_epu8(A) \
((__m256i)__builtin_ia32_vcvttbf162iubs256((__v16bf)(__m256bh)(A)))
#define _mm256_mask_ipcvttbf16_epu8(W, U, A) \
#define _mm256_mask_ipcvtts_bf16_epu8(W, U, A) \
((__m256i)__builtin_ia32_selectw_256((__mmask16)(U), \
(__v16hi)_mm256_ipcvttbf16_epu8(A), \
(__v16hi)_mm256_ipcvtts_bf16_epu8(A), \
(__v16hi)(__m256i)(W)))
#define _mm256_maskz_ipcvttbf16_epu8(U, A) \
#define _mm256_maskz_ipcvtts_bf16_epu8(U, A) \
((__m256i)__builtin_ia32_selectw_256((__mmask16)(U), \
(__v16hi)_mm256_ipcvttbf16_epu8(A), \
(__v16hi)_mm256_ipcvtts_bf16_epu8(A), \
(__v16hi)_mm256_setzero_si256()))
#define _mm_ipcvttph_epi8(A) \
#define _mm_ipcvtts_ph_epi8(A) \
((__m128i)__builtin_ia32_vcvttph2ibs128_mask( \
(__v8hf)(__m128h)(A), (__v8hu)_mm_setzero_si128(), (__mmask8)-1))
#define _mm_mask_ipcvttph_epi8(W, U, A) \
#define _mm_mask_ipcvtts_ph_epi8(W, U, A) \
((__m128i)__builtin_ia32_vcvttph2ibs128_mask((__v8hf)(__m128h)(A), \
(__v8hu)(W), (__mmask8)(U)))
#define _mm_maskz_ipcvttph_epi8(U, A) \
#define _mm_maskz_ipcvtts_ph_epi8(U, A) \
((__m128i)__builtin_ia32_vcvttph2ibs128_mask( \
(__v8hf)(__m128h)(A), (__v8hu)(_mm_setzero_si128()), (__mmask8)(U)))
#define _mm256_ipcvttph_epi8(A) \
#define _mm256_ipcvtts_ph_epi8(A) \
((__m256i)__builtin_ia32_vcvttph2ibs256_mask( \
(__v16hf)(__m256h)(A), (__v16hu)_mm256_setzero_si256(), (__mmask16)-1, \
_MM_FROUND_CUR_DIRECTION))
(__v16hf)(__m256h)(A), (__v16hu)_mm256_setzero_si256(), (__mmask16)-1))
#define _mm256_mask_ipcvttph_epi8(W, U, A) \
#define _mm256_mask_ipcvtts_ph_epi8(W, U, A) \
((__m256i)__builtin_ia32_vcvttph2ibs256_mask((__v16hf)(__m256h)(A), \
(__v16hu)(W), (__mmask16)(U), \
_MM_FROUND_CUR_DIRECTION))
(__v16hu)(W), (__mmask16)(U)))
#define _mm256_maskz_ipcvttph_epi8(U, A) \
#define _mm256_maskz_ipcvtts_ph_epi8(U, A) \
((__m256i)__builtin_ia32_vcvttph2ibs256_mask( \
(__v16hf)(__m256h)(A), (__v16hu)(_mm256_setzero_si256()), \
(__mmask16)(U), _MM_FROUND_CUR_DIRECTION))
(__mmask16)(U)))
#define _mm256_ipcvtt_roundph_epi8(A, R) \
((__m256i)__builtin_ia32_vcvttph2ibs256_mask( \
(__v16hf)(__m256h)(A), (__v16hu)_mm256_setzero_si256(), (__mmask16)-1, \
(const int)R))
#define _mm256_mask_ipcvtt_roundph_epi8(W, U, A, R) \
((__m256i)__builtin_ia32_vcvttph2ibs256_mask( \
(__v16hf)(__m256h)(A), (__v16hu)(W), (__mmask16)(U), (const int)R))
#define _mm256_maskz_ipcvtt_roundph_epi8(U, A, R) \
((__m256i)__builtin_ia32_vcvttph2ibs256_mask( \
(__v16hf)(__m256h)(A), (__v16hu)_mm256_setzero_si256(), (__mmask16)(U), \
(const int)R))
#define _mm_ipcvttph_epu8(A) \
#define _mm_ipcvtts_ph_epu8(A) \
((__m128i)__builtin_ia32_vcvttph2iubs128_mask( \
(__v8hf)(__m128h)(A), (__v8hu)_mm_setzero_si128(), (__mmask8)-1))
#define _mm_mask_ipcvttph_epu8(W, U, A) \
#define _mm_mask_ipcvtts_ph_epu8(W, U, A) \
((__m128i)__builtin_ia32_vcvttph2iubs128_mask((__v8hf)(__m128h)(A), \
(__v8hu)(W), (__mmask8)(U)))
#define _mm_maskz_ipcvttph_epu8(U, A) \
#define _mm_maskz_ipcvtts_ph_epu8(U, A) \
((__m128i)__builtin_ia32_vcvttph2iubs128_mask( \
(__v8hf)(__m128h)(A), (__v8hu)(_mm_setzero_si128()), (__mmask8)(U)))
#define _mm256_ipcvttph_epu8(A) \
#define _mm256_ipcvtts_ph_epu8(A) \
((__m256i)__builtin_ia32_vcvttph2iubs256_mask( \
(__v16hf)(__m256h)(A), (__v16hu)_mm256_setzero_si256(), (__mmask16)-1, \
_MM_FROUND_CUR_DIRECTION))
(__v16hf)(__m256h)(A), (__v16hu)_mm256_setzero_si256(), (__mmask16)-1))
#define _mm256_mask_ipcvttph_epu8(W, U, A) \
#define _mm256_mask_ipcvtts_ph_epu8(W, U, A) \
((__m256i)__builtin_ia32_vcvttph2iubs256_mask((__v16hf)(__m256h)(A), \
(__v16hu)(W), (__mmask16)(U), \
_MM_FROUND_CUR_DIRECTION))
(__v16hu)(W), (__mmask16)(U)))
#define _mm256_maskz_ipcvttph_epu8(U, A) \
#define _mm256_maskz_ipcvtts_ph_epu8(U, A) \
((__m256i)__builtin_ia32_vcvttph2iubs256_mask( \
(__v16hf)(__m256h)(A), (__v16hu)(_mm256_setzero_si256()), \
(__mmask16)(U), _MM_FROUND_CUR_DIRECTION))
(__mmask16)(U)))
#define _mm256_ipcvtt_roundph_epu8(A, R) \
((__m256i)__builtin_ia32_vcvttph2iubs256_mask( \
(__v16hf)(__m256h)(A), (__v16hu)_mm256_setzero_si256(), (__mmask16)-1, \
(const int)R))
#define _mm256_mask_ipcvtt_roundph_epu8(W, U, A, R) \
((__m256i)__builtin_ia32_vcvttph2iubs256_mask( \
(__v16hf)(__m256h)(A), (__v16hu)(W), (__mmask16)(U), (const int)R))
#define _mm256_maskz_ipcvtt_roundph_epu8(U, A, R) \
((__m256i)__builtin_ia32_vcvttph2iubs256_mask( \
(__v16hf)(__m256h)(A), (__v16hu)_mm256_setzero_si256(), (__mmask16)(U), \
(const int)R))
#define _mm_ipcvttps_epi8(A) \
#define _mm_ipcvtts_ps_epi8(A) \
((__m128i)__builtin_ia32_vcvttps2ibs128_mask( \
(__v4sf)(__m128)(A), (__v4su)_mm_setzero_si128(), (__mmask8)-1))
#define _mm_mask_ipcvttps_epi8(W, U, A) \
#define _mm_mask_ipcvtts_ps_epi8(W, U, A) \
((__m128i)__builtin_ia32_vcvttps2ibs128_mask((__v4sf)(__m128)(A), \
(__v4su)(W), (__mmask8)(U)))
#define _mm_maskz_ipcvttps_epi8(U, A) \
#define _mm_maskz_ipcvtts_ps_epi8(U, A) \
((__m128i)__builtin_ia32_vcvttps2ibs128_mask( \
(__v4sf)(__m128)(A), (__v4su)(_mm_setzero_si128()), (__mmask8)(U)))
#define _mm256_ipcvttps_epi8(A) \
#define _mm256_ipcvtts_ps_epi8(A) \
((__m256i)__builtin_ia32_vcvttps2ibs256_mask( \
(__v8sf)(__m256)(A), (__v8su)_mm256_setzero_si256(), (__mmask8)-1, \
_MM_FROUND_CUR_DIRECTION))
(__v8sf)(__m256)(A), (__v8su)_mm256_setzero_si256(), (__mmask8)-1))
#define _mm256_mask_ipcvttps_epi8(W, U, A) \
#define _mm256_mask_ipcvtts_ps_epi8(W, U, A) \
((__m256i)__builtin_ia32_vcvttps2ibs256_mask((__v8sf)(__m256)(A), \
(__v8su)(W), (__mmask8)(U), \
_MM_FROUND_CUR_DIRECTION))
(__v8su)(W), (__mmask8)(U)))
#define _mm256_maskz_ipcvttps_epi8(U, A) \
#define _mm256_maskz_ipcvtts_ps_epi8(U, A) \
((__m256i)__builtin_ia32_vcvttps2ibs256_mask( \
(__v8sf)(__m256)(A), (__v8su)(_mm256_setzero_si256()), (__mmask8)(U), \
_MM_FROUND_CUR_DIRECTION))
(__v8sf)(__m256)(A), (__v8su)(_mm256_setzero_si256()), (__mmask8)(U)))
#define _mm256_ipcvtt_roundps_epi8(A, R) \
((__m256i)__builtin_ia32_vcvttps2ibs256_mask((__v8sf)(__m256)(A), \
(__v8su)_mm256_setzero_si256(), \
(__mmask8)-1, (const int)R))
#define _mm256_mask_ipcvtt_roundps_epi8(W, U, A, R) \
((__m256i)__builtin_ia32_vcvttps2ibs256_mask( \
(__v8sf)(__m256)(A), (__v8su)(W), (__mmask8)(U), (const int)R))
#define _mm256_maskz_ipcvtt_roundps_epi8(U, A, R) \
((__m256i)__builtin_ia32_vcvttps2ibs256_mask((__v8sf)(__m256)(A), \
(__v8su)_mm256_setzero_si256(), \
(__mmask8)(U), (const int)R))
#define _mm_ipcvttps_epu8(A) \
#define _mm_ipcvtts_ps_epu8(A) \
((__m128i)__builtin_ia32_vcvttps2iubs128_mask( \
(__v4sf)(__m128)(A), (__v4su)_mm_setzero_si128(), (__mmask8)-1))
#define _mm_mask_ipcvttps_epu8(W, U, A) \
#define _mm_mask_ipcvtts_ps_epu8(W, U, A) \
((__m128i)__builtin_ia32_vcvttps2iubs128_mask((__v4sf)(__m128)(A), \
(__v4su)(W), (__mmask8)(U)))
#define _mm_maskz_ipcvttps_epu8(U, A) \
#define _mm_maskz_ipcvtts_ps_epu8(U, A) \
((__m128i)__builtin_ia32_vcvttps2iubs128_mask( \
(__v4sf)(__m128)(A), (__v4su)(_mm_setzero_si128()), (__mmask8)(U)))
#define _mm256_ipcvttps_epu8(A) \
#define _mm256_ipcvtts_ps_epu8(A) \
((__m256i)__builtin_ia32_vcvttps2iubs256_mask( \
(__v8sf)(__m256)(A), (__v8su)_mm256_setzero_si256(), (__mmask8)-1, \
_MM_FROUND_CUR_DIRECTION))
(__v8sf)(__m256)(A), (__v8su)_mm256_setzero_si256(), (__mmask8)-1))
#define _mm256_mask_ipcvttps_epu8(W, U, A) \
#define _mm256_mask_ipcvtts_ps_epu8(W, U, A) \
((__m256i)__builtin_ia32_vcvttps2iubs256_mask((__v8sf)(__m256)(A), \
(__v8su)(W), (__mmask8)(U), \
_MM_FROUND_CUR_DIRECTION))
(__v8su)(W), (__mmask8)(U)))
#define _mm256_maskz_ipcvttps_epu8(U, A) \
#define _mm256_maskz_ipcvtts_ps_epu8(U, A) \
((__m256i)__builtin_ia32_vcvttps2iubs256_mask( \
(__v8sf)(__m256)(A), (__v8su)(_mm256_setzero_si256()), (__mmask8)(U), \
_MM_FROUND_CUR_DIRECTION))
#define _mm256_ipcvtt_roundps_epu8(A, R) \
((__m256i)__builtin_ia32_vcvttps2iubs256_mask( \
(__v8sf)(__m256)(A), (__v8su)_mm256_setzero_si256(), (__mmask8)-1, \
(const int)R))
#define _mm256_mask_ipcvtt_roundps_epu8(W, U, A, R) \
((__m256i)__builtin_ia32_vcvttps2iubs256_mask( \
(__v8sf)(__m256)(A), (__v8su)(W), (__mmask8)(U), (const int)R))
#define _mm256_maskz_ipcvtt_roundps_epu8(U, A, R) \
((__m256i)__builtin_ia32_vcvttps2iubs256_mask( \
(__v8sf)(__m256)(A), (__v8su)_mm256_setzero_si256(), (__mmask8)(U), \
(const int)R))
(__v8sf)(__m256)(A), (__v8su)(_mm256_setzero_si256()), (__mmask8)(U)))
#endif // __AVX10_2SATCVTINTRIN_H

View File

@ -553,7 +553,8 @@ static __inline__ __m512h __DEFAULT_FN_ATTRS512 _mm512_abs_ph(__m512h __A) {
}
static __inline__ __m512h __DEFAULT_FN_ATTRS512 _mm512_conj_pch(__m512h __A) {
return (__m512h)_mm512_xor_ps((__m512)__A, _mm512_set1_ps(-0.0f));
return (__m512h)_mm512_xor_epi32((__m512i)__A,
_mm512_set1_epi32(-2147483648));
}
static __inline__ __m512h __DEFAULT_FN_ATTRS512

View File

@ -161,8 +161,6 @@ _mm_tzcnt_64(unsigned long long __X) {
#undef __RELAXED_FN_ATTRS
#if !defined(__SCE__) || __has_feature(modules) || defined(__BMI__)
/* Define the default attributes for the functions in this file. */
#if defined(__cplusplus) && (__cplusplus >= 201103L)
#define __DEFAULT_FN_ATTRS \
@ -603,6 +601,4 @@ __blsr_u64(unsigned long long __X) {
#undef __DEFAULT_FN_ATTRS
#endif /* !defined(__SCE__) || __has_feature(modules) || defined(__BMI__) */
#endif /* __BMIINTRIN_H */

46
lib/include/cpuid.h vendored
View File

@ -267,18 +267,18 @@
: "0"(__leaf), "2"(__count))
#else
/* x86-64 uses %rbx as the base register, so preserve it. */
#define __cpuid(__leaf, __eax, __ebx, __ecx, __edx) \
__asm(" xchgq %%rbx,%q1\n" \
" cpuid\n" \
" xchgq %%rbx,%q1" \
: "=a"(__eax), "=r" (__ebx), "=c"(__ecx), "=d"(__edx) \
#define __cpuid(__leaf, __eax, __ebx, __ecx, __edx) \
__asm(" xchg{q|} {%%|}rbx,%q1\n" \
" cpuid\n" \
" xchg{q|} {%%|}rbx,%q1" \
: "=a"(__eax), "=r"(__ebx), "=c"(__ecx), "=d"(__edx) \
: "0"(__leaf))
#define __cpuid_count(__leaf, __count, __eax, __ebx, __ecx, __edx) \
__asm(" xchgq %%rbx,%q1\n" \
" cpuid\n" \
" xchgq %%rbx,%q1" \
: "=a"(__eax), "=r" (__ebx), "=c"(__ecx), "=d"(__edx) \
#define __cpuid_count(__leaf, __count, __eax, __ebx, __ecx, __edx) \
__asm(" xchg{q|} {%%|}rbx,%q1\n" \
" cpuid\n" \
" xchg{q|} {%%|}rbx,%q1" \
: "=a"(__eax), "=r"(__ebx), "=c"(__ecx), "=d"(__edx) \
: "0"(__leaf), "2"(__count))
#endif
@ -289,20 +289,22 @@ static __inline unsigned int __get_cpuid_max (unsigned int __leaf,
#ifdef __i386__
int __cpuid_supported;
__asm(" pushfl\n"
" popl %%eax\n"
" movl %%eax,%%ecx\n"
" xorl $0x00200000,%%eax\n"
" pushl %%eax\n"
" popfl\n"
" pushfl\n"
" popl %%eax\n"
" movl $0,%0\n"
" cmpl %%eax,%%ecx\n"
__asm(" pushf{l|d}\n"
" pop{l|} {%%|}eax\n"
" mov{l|} {%%eax,%%ecx|ecx,eax}\n"
" xor{l|} {$0x00200000,%%eax|eax,0x00200000}\n"
" push{l|} {%%|}eax\n"
" popf{l|d}\n"
" pushf{l|d}\n"
" pop{l|} {%%|}eax\n"
" mov{l|} {$0,%0|%0,0}\n"
" cmp{l|} {%%eax,%%ecx|ecx,eax}\n"
" je 1f\n"
" movl $1,%0\n"
" mov{l|} {$1,%0|%0,1}\n"
"1:"
: "=r" (__cpuid_supported) : : "eax", "ecx");
: "=r"(__cpuid_supported)
:
: "eax", "ecx");
if (!__cpuid_supported)
return 0;
#endif

13
lib/include/float.h vendored
View File

@ -18,21 +18,12 @@
* additional definitions provided for Windows.
* For more details see http://msdn.microsoft.com/en-us/library/y0ybw9fy.aspx
*
* Also fall back on Darwin and AIX to allow additional definitions and
* Also fall back on AIX to allow additional definitions and
* implementation-defined values.
*/
#if (defined(__APPLE__) || defined(__MINGW32__) || defined(_MSC_VER) || \
defined(_AIX)) && \
#if (defined(__MINGW32__) || defined(_MSC_VER) || defined(_AIX)) && \
__STDC_HOSTED__ && __has_include_next(<float.h>)
/* Prior to Apple's 10.7 SDK, float.h SDK header used to apply an extra level
* of #include_next<float.h> to keep Metrowerks compilers happy. Avoid this
* extra indirection.
*/
#ifdef __APPLE__
#define _FLOAT_H_
#endif
# include_next <float.h>
/* Undefine anything that we'll be redefining below. */

View File

@ -16,231 +16,112 @@
#include <x86gprintrin.h>
#if !defined(__SCE__) || __has_feature(modules) || defined(__MMX__)
#include <mmintrin.h>
#endif
#if !defined(__SCE__) || __has_feature(modules) || defined(__SSE__)
#include <xmmintrin.h>
#endif
#if !defined(__SCE__) || __has_feature(modules) || defined(__SSE2__)
#include <emmintrin.h>
#endif
#if !defined(__SCE__) || __has_feature(modules) || defined(__SSE3__)
#include <pmmintrin.h>
#endif
#if !defined(__SCE__) || __has_feature(modules) || defined(__SSSE3__)
#include <tmmintrin.h>
#endif
#if !defined(__SCE__) || __has_feature(modules) || \
(defined(__SSE4_2__) || defined(__SSE4_1__))
#include <smmintrin.h>
#endif
#if !defined(__SCE__) || __has_feature(modules) || \
(defined(__AES__) || defined(__PCLMUL__))
#include <wmmintrin.h>
#endif
#if !defined(__SCE__) || __has_feature(modules) || defined(__CLFLUSHOPT__)
#include <clflushoptintrin.h>
#endif
#if !defined(__SCE__) || __has_feature(modules) || defined(__CLWB__)
#include <clwbintrin.h>
#endif
#if !defined(__SCE__) || __has_feature(modules) || defined(__AVX__)
#include <avxintrin.h>
#endif
#if !defined(__SCE__) || __has_feature(modules) || defined(__AVX2__)
#include <avx2intrin.h>
#endif
#if !defined(__SCE__) || __has_feature(modules) || defined(__F16C__)
#include <f16cintrin.h>
#endif
/* No feature check desired due to internal checks */
#include <bmiintrin.h>
#if !defined(__SCE__) || __has_feature(modules) || defined(__BMI2__)
#include <bmi2intrin.h>
#endif
#if !defined(__SCE__) || __has_feature(modules) || defined(__LZCNT__)
#include <lzcntintrin.h>
#endif
#if !defined(__SCE__) || __has_feature(modules) || defined(__POPCNT__)
#include <popcntintrin.h>
#endif
#if !defined(__SCE__) || __has_feature(modules) || defined(__FMA__)
#include <fmaintrin.h>
#endif
#if !defined(__SCE__) || __has_feature(modules) || defined(__AVX512F__)
#include <avx512fintrin.h>
#endif
#if !defined(__SCE__) || __has_feature(modules) || defined(__AVX512VL__)
#include <avx512vlintrin.h>
#endif
#if !defined(__SCE__) || __has_feature(modules) || defined(__AVX512BW__)
#include <avx512bwintrin.h>
#endif
#if !defined(__SCE__) || __has_feature(modules) || defined(__AVX512BITALG__)
#include <avx512bitalgintrin.h>
#endif
#if !defined(__SCE__) || __has_feature(modules) || defined(__AVX512CD__)
#include <avx512cdintrin.h>
#endif
#if !defined(__SCE__) || __has_feature(modules) || defined(__AVX512VPOPCNTDQ__)
#include <avx512vpopcntdqintrin.h>
#endif
#if !defined(__SCE__) || __has_feature(modules) || \
(defined(__AVX512VL__) && defined(__AVX512VPOPCNTDQ__))
#include <avx512vpopcntdqvlintrin.h>
#endif
#if !defined(__SCE__) || __has_feature(modules) || defined(__AVX512VNNI__)
#include <avx512vnniintrin.h>
#endif
#if !defined(__SCE__) || __has_feature(modules) || \
(defined(__AVX512VL__) && defined(__AVX512VNNI__))
#include <avx512vlvnniintrin.h>
#endif
#if !defined(__SCE__) || __has_feature(modules) || defined(__AVXVNNI__)
#include <avxvnniintrin.h>
#endif
#if !defined(__SCE__) || __has_feature(modules) || defined(__AVX512DQ__)
#include <avx512dqintrin.h>
#endif
#if !defined(__SCE__) || __has_feature(modules) || \
(defined(__AVX512VL__) && defined(__AVX512BITALG__))
#include <avx512vlbitalgintrin.h>
#endif
#if !defined(__SCE__) || __has_feature(modules) || \
(defined(__AVX512VL__) && defined(__AVX512BW__))
#include <avx512vlbwintrin.h>
#endif
#if !defined(__SCE__) || __has_feature(modules) || \
(defined(__AVX512VL__) && defined(__AVX512CD__))
#include <avx512vlcdintrin.h>
#endif
#if !defined(__SCE__) || __has_feature(modules) || \
(defined(__AVX512VL__) && defined(__AVX512DQ__))
#include <avx512vldqintrin.h>
#endif
#if !defined(__SCE__) || __has_feature(modules) || defined(__AVX512IFMA__)
#include <avx512ifmaintrin.h>
#endif
#if !defined(__SCE__) || __has_feature(modules) || \
(defined(__AVX512IFMA__) && defined(__AVX512VL__))
#include <avx512ifmavlintrin.h>
#endif
#if !defined(__SCE__) || __has_feature(modules) || defined(__AVXIFMA__)
#include <avxifmaintrin.h>
#endif
#if !defined(__SCE__) || __has_feature(modules) || defined(__AVX512VBMI__)
#include <avx512vbmiintrin.h>
#endif
#if !defined(__SCE__) || __has_feature(modules) || \
(defined(__AVX512VBMI__) && defined(__AVX512VL__))
#include <avx512vbmivlintrin.h>
#endif
#if !defined(__SCE__) || __has_feature(modules) || defined(__AVX512VBMI2__)
#include <avx512vbmi2intrin.h>
#endif
#if !defined(__SCE__) || __has_feature(modules) || \
(defined(__AVX512VBMI2__) && defined(__AVX512VL__))
#include <avx512vlvbmi2intrin.h>
#endif
#if !defined(__SCE__) || __has_feature(modules) || defined(__AVX512FP16__)
#include <avx512fp16intrin.h>
#endif
#if !defined(__SCE__) || __has_feature(modules) || \
(defined(__AVX512VL__) && defined(__AVX512FP16__))
#include <avx512vlfp16intrin.h>
#endif
#if !defined(__SCE__) || __has_feature(modules) || defined(__AVX512BF16__)
#include <avx512bf16intrin.h>
#endif
#if !defined(__SCE__) || __has_feature(modules) || \
(defined(__AVX512VL__) && defined(__AVX512BF16__))
#include <avx512vlbf16intrin.h>
#endif
#if !defined(__SCE__) || __has_feature(modules) || defined(__PKU__)
#include <pkuintrin.h>
#endif
#if !defined(__SCE__) || __has_feature(modules) || defined(__VPCLMULQDQ__)
#include <vpclmulqdqintrin.h>
#endif
#if !defined(__SCE__) || __has_feature(modules) || defined(__VAES__)
#include <vaesintrin.h>
#endif
#if !defined(__SCE__) || __has_feature(modules) || defined(__GFNI__)
#include <gfniintrin.h>
#endif
#if !defined(__SCE__) || __has_feature(modules) || defined(__AVXVNNIINT8__)
#include <avxvnniint8intrin.h>
#endif
#if !defined(__SCE__) || __has_feature(modules) || defined(__AVXNECONVERT__)
#include <avxneconvertintrin.h>
#endif
#if !defined(__SCE__) || __has_feature(modules) || defined(__SHA512__)
#include <sha512intrin.h>
#endif
#if !defined(__SCE__) || __has_feature(modules) || defined(__SM3__)
#include <sm3intrin.h>
#endif
#if !defined(__SCE__) || __has_feature(modules) || defined(__SM4__)
#include <sm4intrin.h>
#endif
#if !defined(__SCE__) || __has_feature(modules) || defined(__AVXVNNIINT16__)
#include <avxvnniint16intrin.h>
#endif
#if !defined(__SCE__) || __has_feature(modules) || defined(__RDPID__)
/// Reads the value of the IA32_TSC_AUX MSR (0xc0000103).
///
/// \headerfile <immintrin.h>
@ -252,9 +133,7 @@ static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__, __
_rdpid_u32(void) {
return __builtin_ia32_rdpid();
}
#endif // __RDPID__
#if !defined(__SCE__) || __has_feature(modules) || defined(__RDRND__)
/// Returns a 16-bit hardware-generated random value.
///
/// \headerfile <immintrin.h>
@ -314,9 +193,7 @@ _rdrand64_step(unsigned long long *__p)
}
#endif
}
#endif /* __RDRND__ */
#if !defined(__SCE__) || __has_feature(modules) || defined(__FSGSBASE__)
#ifdef __x86_64__
/// Reads the FS base register.
///
@ -427,9 +304,6 @@ _writegsbase_u64(unsigned long long __V)
}
#endif
#endif /* __FSGSBASE__ */
#if !defined(__SCE__) || __has_feature(modules) || defined(__MOVBE__)
/* The structs used below are to force the load/store to be unaligned. This
* is accomplished with the __packed__ attribute. The __may_alias__ prevents
@ -543,172 +417,86 @@ _storebe_i64(void * __P, long long __D) {
((struct __storeu_i64*)__P)->__v = __builtin_bswap64((unsigned long long)__D);
}
#endif
#endif /* __MOVBE */
#if !defined(__SCE__) || __has_feature(modules) || defined(__RTM__)
#include <rtmintrin.h>
#include <xtestintrin.h>
#endif
#if !defined(__SCE__) || __has_feature(modules) || defined(__SHA__)
#include <shaintrin.h>
#endif
#if !defined(__SCE__) || __has_feature(modules) || defined(__FXSR__)
#include <fxsrintrin.h>
#endif
/* No feature check desired due to internal MSC_VER checks */
#include <xsaveintrin.h>
#if !defined(__SCE__) || __has_feature(modules) || defined(__XSAVEOPT__)
#include <xsaveoptintrin.h>
#endif
#if !defined(__SCE__) || __has_feature(modules) || defined(__XSAVEC__)
#include <xsavecintrin.h>
#endif
#if !defined(__SCE__) || __has_feature(modules) || defined(__XSAVES__)
#include <xsavesintrin.h>
#endif
#if !defined(__SCE__) || __has_feature(modules) || defined(__SHSTK__)
#include <cetintrin.h>
#endif
/* Intrinsics inside adcintrin.h are available at all times. */
#include <adcintrin.h>
#if !defined(__SCE__) || __has_feature(modules) || defined(__ADX__)
#include <adxintrin.h>
#endif
#if !defined(__SCE__) || __has_feature(modules) || defined(__RDSEED__)
#include <rdseedintrin.h>
#endif
#if !defined(__SCE__) || __has_feature(modules) || defined(__WBNOINVD__)
#include <wbnoinvdintrin.h>
#endif
#if !defined(__SCE__) || __has_feature(modules) || defined(__CLDEMOTE__)
#include <cldemoteintrin.h>
#endif
#if !defined(__SCE__) || __has_feature(modules) || defined(__WAITPKG__)
#include <waitpkgintrin.h>
#endif
#if !defined(__SCE__) || __has_feature(modules) || defined(__MOVDIRI__) || \
defined(__MOVDIR64B__)
#include <movdirintrin.h>
#endif
#if !defined(__SCE__) || __has_feature(modules) || defined(__MOVRS__)
#include <movrsintrin.h>
#endif
#if !defined(__SCE__) || __has_feature(modules) || \
(defined(__AVX10_2__) && defined(__MOVRS__))
#include <movrs_avx10_2intrin.h>
#endif
#if !defined(__SCE__) || __has_feature(modules) || \
(defined(__AVX10_2_512__) && defined(__MOVRS__))
#include <movrs_avx10_2_512intrin.h>
#endif
#if !defined(__SCE__) || __has_feature(modules) || defined(__PCONFIG__)
#include <pconfigintrin.h>
#endif
#if !defined(__SCE__) || __has_feature(modules) || defined(__SGX__)
#include <sgxintrin.h>
#endif
#if !defined(__SCE__) || __has_feature(modules) || defined(__PTWRITE__)
#include <ptwriteintrin.h>
#endif
#if !defined(__SCE__) || __has_feature(modules) || defined(__INVPCID__)
#include <invpcidintrin.h>
#endif
#if !defined(__SCE__) || __has_feature(modules) || defined(__KL__) || \
defined(__WIDEKL__)
#include <keylockerintrin.h>
#endif
#if !defined(__SCE__) || __has_feature(modules) || defined(__AMX_TILE__) || \
defined(__AMX_INT8__) || defined(__AMX_BF16__)
#include <amxintrin.h>
#endif
#if !defined(__SCE__) || __has_feature(modules) || defined(__AMX_FP16__)
#include <amxfp16intrin.h>
#endif
#if !defined(__SCE__) || __has_feature(modules) || defined(__AMX_COMPLEX__)
#include <amxcomplexintrin.h>
#endif
#if !defined(__SCE__) || __has_feature(modules) || defined(__AMX_FP8__)
#include <amxfp8intrin.h>
#endif
#if !defined(__SCE__) || __has_feature(modules) || defined(__AMX_TRANSPOSE__)
#include <amxtransposeintrin.h>
#endif
#if !defined(__SCE__) || __has_feature(modules) || defined(__AMX_MOVRS__)
#include <amxmovrsintrin.h>
#endif
#if !defined(__SCE__) || __has_feature(modules) || \
(defined(__AMX_MOVRS__) && defined(__AMX_TRANSPOSE__))
#include <amxmovrstransposeintrin.h>
#endif
#if !defined(__SCE__) || __has_feature(modules) || defined(__AMX_AVX512__)
#include <amxavx512intrin.h>
#endif
#if !defined(__SCE__) || __has_feature(modules) || defined(__AMX_TF32__)
#include <amxtf32intrin.h>
#endif
#if !defined(__SCE__) || __has_feature(modules) || \
(defined(__AMX_TF32__) && defined(__AMX_TRANSPOSE__))
#include <amxtf32transposeintrin.h>
#endif
#if !defined(__SCE__) || __has_feature(modules) || \
(defined(__AMX_BF16__) && defined(__AMX_TRANSPOSE__))
#include <amxbf16transposeintrin.h>
#endif
#if !defined(__SCE__) || __has_feature(modules) || \
(defined(__AMX_FP16__) && defined(__AMX_TRANSPOSE__))
#include <amxfp16transposeintrin.h>
#endif
#if !defined(__SCE__) || __has_feature(modules) || \
(defined(__AMX_COMPLEX__) && defined(__AMX_TRANSPOSE__))
#include <amxcomplextransposeintrin.h>
#endif
#if !defined(__SCE__) || __has_feature(modules) || \
defined(__AVX512VP2INTERSECT__)
#include <avx512vp2intersectintrin.h>
#endif
#if !defined(__SCE__) || __has_feature(modules) || \
(defined(__AVX512VL__) && defined(__AVX512VP2INTERSECT__))
#include <avx512vlvp2intersectintrin.h>
#endif
#if !defined(__SCE__) || __has_feature(modules) || defined(__AVX10_2__)
#include <avx10_2bf16intrin.h>
#include <avx10_2convertintrin.h>
#include <avx10_2copyintrin.h>
@ -716,33 +504,21 @@ _storebe_i64(void * __P, long long __D) {
#include <avx10_2niintrin.h>
#include <avx10_2satcvtdsintrin.h>
#include <avx10_2satcvtintrin.h>
#endif
#if !defined(__SCE__) || __has_feature(modules) || defined(__AVX10_2_512__)
#include <avx10_2_512bf16intrin.h>
#include <avx10_2_512convertintrin.h>
#include <avx10_2_512minmaxintrin.h>
#include <avx10_2_512niintrin.h>
#include <avx10_2_512satcvtdsintrin.h>
#include <avx10_2_512satcvtintrin.h>
#endif
#if !defined(__SCE__) || __has_feature(modules) || \
(defined(__AVX10_2_512__) && defined(__SM4__))
#include <sm4evexintrin.h>
#endif
#if !defined(__SCE__) || __has_feature(modules) || defined(__ENQCMD__)
#include <enqcmdintrin.h>
#endif
#if !defined(__SCE__) || __has_feature(modules) || defined(__SERIALIZE__)
#include <serializeintrin.h>
#endif
#if !defined(__SCE__) || __has_feature(modules) || defined(__TSXLDTRK__)
#include <tsxldtrkintrin.h>
#endif
#if defined(_MSC_VER) && __has_extension(gnu_asm)
/* Define the default attributes for these intrinsics */

25
lib/include/intrin.h vendored
View File

@ -162,8 +162,6 @@ void _Store_HLERelease(long volatile *, long);
void _Store64_HLERelease(__int64 volatile *, __int64);
void _StorePointer_HLERelease(void *volatile *, void *);
void _WriteBarrier(void);
unsigned __int32 xbegin(void);
void _xend(void);
/* These additional intrinsics are turned on in x64/amd64/x86_64 mode. */
#if defined(__x86_64__) && !defined(__arm64ec__)
@ -372,10 +370,29 @@ static __inline__ void __DEFAULT_FN_ATTRS __nop(void) {
\*----------------------------------------------------------------------------*/
#if defined(__aarch64__) || defined(__arm64ec__)
unsigned __int64 __getReg(int);
long _InterlockedAdd(long volatile *Addend, long Value);
__int64 _InterlockedAdd64(__int64 volatile *Addend, __int64 Value);
unsigned char _interlockedbittestandreset_acq(long volatile *, long);
unsigned char _interlockedbittestandreset_nf(long volatile *, long);
unsigned char _interlockedbittestandreset_rel(long volatile *, long);
unsigned char _interlockedbittestandreset64_acq(__int64 volatile *, __int64);
unsigned char _interlockedbittestandreset64_nf(__int64 volatile *, __int64);
unsigned char _interlockedbittestandreset64_rel(__int64 volatile *, __int64);
unsigned char _interlockedbittestandset_acq(long volatile *, long);
unsigned char _interlockedbittestandset_nf(long volatile *, long);
unsigned char _interlockedbittestandset_rel(long volatile *, long);
unsigned char _interlockedbittestandset64_acq(__int64 volatile *, __int64);
unsigned char _interlockedbittestandset64_nf(__int64 volatile *, __int64);
unsigned char _interlockedbittestandset64_rel(__int64 volatile *, __int64);
long _InterlockedAdd(long volatile *, long);
long _InterlockedAdd_acq(long volatile *, long);
long _InterlockedAdd_nf(long volatile *, long);
long _InterlockedAdd_rel(long volatile *, long);
__int64 _InterlockedAdd64(__int64 volatile *, __int64);
__int64 _InterlockedAdd64_acq(__int64 volatile *, __int64);
__int64 _InterlockedAdd64_nf(__int64 volatile *, __int64);
__int64 _InterlockedAdd64_rel(__int64 volatile *, __int64);
__int64 _ReadStatusReg(int);
void _WriteStatusReg(int, __int64);
unsigned int __sys(int, __int64);
unsigned short __cdecl _byteswap_ushort(unsigned short val);
unsigned long __cdecl _byteswap_ulong (unsigned long val);

View File

@ -28,8 +28,6 @@
#ifndef _KEYLOCKERINTRIN_H
#define _KEYLOCKERINTRIN_H
#if !defined(__SCE__) || __has_feature(modules) || defined(__KL__)
/* Define the default attributes for the functions in this file. */
#define __DEFAULT_FN_ATTRS \
__attribute__((__always_inline__, __nodebug__, __target__("kl"),\
@ -326,10 +324,6 @@ _mm_aesdec256kl_u8(__m128i* __odata, __m128i __idata, const void *__h) {
#undef __DEFAULT_FN_ATTRS
#endif /* !defined(__SCE__ || __has_feature(modules) || defined(__KL__) */
#if !defined(__SCE__) || __has_feature(modules) || defined(__WIDEKL__)
/* Define the default attributes for the functions in this file. */
#define __DEFAULT_FN_ATTRS \
__attribute__((__always_inline__, __nodebug__, __target__("kl,widekl"),\
@ -521,7 +515,4 @@ _mm_aesdecwide256kl_u8(__m128i __odata[8], const __m128i __idata[8], const void*
#undef __DEFAULT_FN_ATTRS
#endif /* !defined(__SCE__) || __has_feature(modules) || defined(__WIDEKL__) \
*/
#endif /* _KEYLOCKERINTRIN_H */

View File

@ -25,7 +25,7 @@
// The LLVM C library uses these named types so we forward declare them.
typedef void (*__atexithandler_t)(void);
typedef int (*__bsearchcompare_t)(const void *, const void *);
typedef int (*__search_compare_t)(const void *, const void *);
typedef int (*__qsortcompare_t)(const void *, const void *);
typedef int (*__qsortrcompare_t)(const void *, const void *, void *);

View File

@ -14,13 +14,15 @@
#ifndef __LZCNTINTRIN_H
#define __LZCNTINTRIN_H
/* Define the default attributes for the functions in this file. */
/* Define the default attributes for the functions in this file.
Allow using the lzcnt intrinsics even for non-LZCNT targets. Since the LZCNT
intrinsics are mapped to llvm.ctlz.*, false, which can be lowered to BSR on
non-LZCNT targets with zero-value input handled correctly. */
#if defined(__cplusplus) && (__cplusplus >= 201103L)
#define __DEFAULT_FN_ATTRS \
__attribute__((__always_inline__, __nodebug__, __target__("lzcnt"))) constexpr
__attribute__((__always_inline__, __nodebug__)) constexpr
#else
#define __DEFAULT_FN_ATTRS \
__attribute__((__always_inline__, __nodebug__, __target__("lzcnt")))
#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__))
#endif
#ifndef _MSC_VER

View File

@ -35,6 +35,14 @@ module _Builtin_intrinsics [system] [extern_c] {
}
}
explicit module arm64 {
requires arm64
requires windows
header "arm64intr.h"
export *
}
explicit module intel {
requires x86
export *
@ -231,6 +239,11 @@ module _Builtin_stdbool [system] {
export *
}
module _Builtin_stdcountof [system] {
header "stdcountof.h"
export *
}
module _Builtin_stddef [system] {
textual header "stddef.h"

View File

@ -56,4 +56,4 @@ _m_prefetchrs(volatile const void *__P) {
}
#undef __DEFAULT_FN_ATTRS
#endif // __MOVRSINTRIN_H
#endif // __MOVRSINTRIN_H

View File

@ -14,6 +14,10 @@
#ifndef __PRFCHWINTRIN_H
#define __PRFCHWINTRIN_H
#if defined(__cplusplus)
extern "C" {
#endif
/// Loads a memory sequence containing the specified memory address into
/// all data cache levels.
///
@ -26,11 +30,7 @@
///
/// \param __P
/// A pointer specifying the memory address to be prefetched.
static __inline__ void __attribute__((__always_inline__, __nodebug__))
_m_prefetch(void *__P)
{
__builtin_prefetch (__P, 0, 3 /* _MM_HINT_T0 */);
}
void _m_prefetch(void *__P);
/// Loads a memory sequence containing the specified memory address into
/// the L1 data cache and sets the cache-coherency state to modified.
@ -48,13 +48,10 @@ _m_prefetch(void *__P)
///
/// \param __P
/// A pointer specifying the memory address to be prefetched.
static __inline__ void __attribute__((__always_inline__, __nodebug__))
_m_prefetchw(volatile const void *__P)
{
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wcast-qual"
__builtin_prefetch ((const void*)__P, 1, 3 /* _MM_HINT_T0 */);
#pragma clang diagnostic pop
}
void _m_prefetchw(volatile const void *__P);
#if defined(__cplusplus)
} // extern "C"
#endif
#endif /* __PRFCHWINTRIN_H */

57
lib/include/ptrauth.h vendored
View File

@ -42,6 +42,19 @@ typedef enum {
The extra data is always 0. */
ptrauth_key_cxx_vtable_pointer = ptrauth_key_process_independent_data,
/* The key used to sign metadata pointers to Objective-C method-lists. */
ptrauth_key_method_list_pointer = ptrauth_key_asda,
/* The key used to sign Objective-C isa and super pointers. */
ptrauth_key_objc_isa_pointer = ptrauth_key_process_independent_data,
ptrauth_key_objc_super_pointer = ptrauth_key_process_independent_data,
/* The key used to sign selector pointers */
ptrauth_key_objc_sel_pointer = ptrauth_key_process_dependent_data,
/* The key used to sign Objective-C class_ro_t pointers. */
ptrauth_key_objc_class_ro_pointer = ptrauth_key_process_independent_data,
/* The key used to sign pointers in ELF .init_array/.fini_array. */
ptrauth_key_init_fini_pointer = ptrauth_key_process_independent_code,
@ -259,6 +272,46 @@ typedef __UINTPTR_TYPE__ ptrauth_generic_signature_t;
/* The value is ptrauth_string_discriminator("init_fini") */
#define __ptrauth_init_fini_discriminator 0xd9d4
/* Objective-C pointer auth ABI qualifiers */
#define __ptrauth_objc_method_list_imp \
__ptrauth(ptrauth_key_function_pointer, 1, 0)
#if __has_feature(ptrauth_objc_method_list_pointer)
#define __ptrauth_objc_method_list_pointer \
__ptrauth(ptrauth_key_method_list_pointer, 1, 0xC310)
#else
#define __ptrauth_objc_method_list_pointer
#endif
#define __ptrauth_isa_discriminator 0x6AE1
#define __ptrauth_super_discriminator 0xB5AB
#define __ptrauth_objc_isa_pointer \
__ptrauth(ptrauth_key_objc_isa_pointer, 1, __ptrauth_isa_discriminator)
#if __has_feature(ptrauth_restricted_intptr_qualifier)
#define __ptrauth_objc_isa_uintptr \
__ptrauth_restricted_intptr(ptrauth_key_objc_isa_pointer, 1, \
__ptrauth_isa_discriminator)
#else
#define __ptrauth_objc_isa_uintptr \
__ptrauth(ptrauth_key_objc_isa_pointer, 1, __ptrauth_isa_discriminator)
#endif
#define __ptrauth_objc_super_pointer \
__ptrauth(ptrauth_key_objc_super_pointer, 1, __ptrauth_super_discriminator)
#define __ptrauth_objc_sel_discriminator 0x57c2
#if __has_feature(ptrauth_objc_interface_sel)
#define __ptrauth_objc_sel \
__ptrauth(ptrauth_key_objc_sel_pointer, 1, __ptrauth_objc_sel_discriminator)
#else
#define __ptrauth_objc_sel
#endif
#define __ptrauth_objc_class_ro_discriminator 0x61f8
#define __ptrauth_objc_class_ro \
__ptrauth(ptrauth_key_objc_class_ro_pointer, 1, \
__ptrauth_objc_class_ro_discriminator)
#else
#define ptrauth_strip(__value, __key) \
@ -331,6 +384,10 @@ typedef __UINTPTR_TYPE__ ptrauth_generic_signature_t;
#define ptrauth_cxx_vtable_pointer(key, address_discrimination, \
extra_discrimination...)
#define __ptrauth_objc_isa_pointer
#define __ptrauth_objc_isa_uintptr
#define __ptrauth_objc_super_pointer
#endif /* __has_feature(ptrauth_intrinsics) */
#endif /* __PTRAUTH_H */

View File

@ -24,13 +24,13 @@ static __inline__ long __DEFAULT_FN_ATTRS __riscv_cv_abs(long a) {
return __builtin_abs(a);
}
static __inline__ long __DEFAULT_FN_ATTRS __riscv_cv_alu_slet(long a, long b) {
return __builtin_riscv_cv_alu_slet(a, b);
static __inline__ long __DEFAULT_FN_ATTRS __riscv_cv_alu_sle(long a, long b) {
return __builtin_riscv_cv_alu_sle(a, b);
}
static __inline__ long __DEFAULT_FN_ATTRS
__riscv_cv_alu_sletu(unsigned long a, unsigned long b) {
return __builtin_riscv_cv_alu_sletu(a, b);
__riscv_cv_alu_sleu(unsigned long a, unsigned long b) {
return __builtin_riscv_cv_alu_sleu(a, b);
}
static __inline__ long __DEFAULT_FN_ATTRS __riscv_cv_alu_min(long a, long b) {

View File

@ -49,7 +49,6 @@ enum __RISCV_FRM {
#define __riscv_vsetvl_e32m4(avl) __builtin_rvv_vsetvli((size_t)(avl), 2, 2)
#define __riscv_vsetvl_e32m8(avl) __builtin_rvv_vsetvli((size_t)(avl), 2, 3)
#if __riscv_v_elen >= 64
#define __riscv_vsetvl_e8mf8(avl) __builtin_rvv_vsetvli((size_t)(avl), 0, 5)
#define __riscv_vsetvl_e16mf4(avl) __builtin_rvv_vsetvli((size_t)(avl), 1, 6)
#define __riscv_vsetvl_e32mf2(avl) __builtin_rvv_vsetvli((size_t)(avl), 2, 7)
@ -58,7 +57,6 @@ enum __RISCV_FRM {
#define __riscv_vsetvl_e64m2(avl) __builtin_rvv_vsetvli((size_t)(avl), 3, 1)
#define __riscv_vsetvl_e64m4(avl) __builtin_rvv_vsetvli((size_t)(avl), 3, 2)
#define __riscv_vsetvl_e64m8(avl) __builtin_rvv_vsetvli((size_t)(avl), 3, 3)
#endif
#define __riscv_vsetvlmax_e8mf4() __builtin_rvv_vsetvlimax(0, 6)
#define __riscv_vsetvlmax_e8mf2() __builtin_rvv_vsetvlimax(0, 7)
@ -78,7 +76,6 @@ enum __RISCV_FRM {
#define __riscv_vsetvlmax_e32m4() __builtin_rvv_vsetvlimax(2, 2)
#define __riscv_vsetvlmax_e32m8() __builtin_rvv_vsetvlimax(2, 3)
#if __riscv_v_elen >= 64
#define __riscv_vsetvlmax_e8mf8() __builtin_rvv_vsetvlimax(0, 5)
#define __riscv_vsetvlmax_e16mf4() __builtin_rvv_vsetvlimax(1, 6)
#define __riscv_vsetvlmax_e32mf2() __builtin_rvv_vsetvlimax(2, 7)
@ -87,7 +84,6 @@ enum __RISCV_FRM {
#define __riscv_vsetvlmax_e64m2() __builtin_rvv_vsetvlimax(3, 1)
#define __riscv_vsetvlmax_e64m4() __builtin_rvv_vsetvlimax(3, 2)
#define __riscv_vsetvlmax_e64m8() __builtin_rvv_vsetvlimax(3, 3)
#endif
enum __RISCV_VXRM {

View File

@ -47,8 +47,9 @@
/// An immediate value where bits [1:0] select among four possible
/// combining functions and rounding constants (not specified here).
/// \returns A 128-bit vector of [4 x i32] containing the updated SHA-1 state.
#define _mm_sha1rnds4_epu32(V1, V2, M) \
__builtin_ia32_sha1rnds4((__v4si)(__m128i)(V1), (__v4si)(__m128i)(V2), (M))
#define _mm_sha1rnds4_epu32(V1, V2, M) \
((__m128i)__builtin_ia32_sha1rnds4((__v4si)(__m128i)(V1), \
(__v4si)(__m128i)(V2), (M)))
/// Calculates the SHA-1 state variable E from the SHA-1 state variables in
/// the 128-bit vector of [4 x i32] in \a __X, adds that to the next set of

15
lib/include/stdcountof.h vendored Normal file
View File

@ -0,0 +1,15 @@
/*===---- stdcountof.h - Standard header for countof -----------------------===
*
* Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
* See https://llvm.org/LICENSE.txt for license information.
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*
*===-----------------------------------------------------------------------===
*/
#ifndef __STDCOUNTOF_H
#define __STDCOUNTOF_H
#define countof _Countof
#endif /* __STDCOUNTOF_H */

147
lib/include/stdint.h vendored
View File

@ -317,166 +317,55 @@ typedef __UINTMAX_TYPE__ uintmax_t;
* integer width that the target implements, so corresponding macros are
* defined below, too.
*
* These macros are defined using the same successive-shrinking approach as
* the type definitions above. It is likewise important that macros are defined
* in order of decending width.
*
* Note that C++ should not check __STDC_CONSTANT_MACROS here, contrary to the
* claims of the C standard (see C++ 18.3.1p2, [cstdint.syn]).
*/
#define __int_c_join(a, b) a ## b
#define __int_c(v, suffix) __int_c_join(v, suffix)
#define __uint_c(v, suffix) __int_c_join(v##U, suffix)
#ifdef __INT64_TYPE__
# undef __int64_c_suffix
# undef __int32_c_suffix
# undef __int16_c_suffix
# undef __int8_c_suffix
# ifdef __INT64_C_SUFFIX__
# define __int64_c_suffix __INT64_C_SUFFIX__
# define __int32_c_suffix __INT64_C_SUFFIX__
# define __int16_c_suffix __INT64_C_SUFFIX__
# define __int8_c_suffix __INT64_C_SUFFIX__
# endif /* __INT64_C_SUFFIX__ */
#endif /* __INT64_TYPE__ */
#ifdef __int_least64_t
# ifdef __int64_c_suffix
# define INT64_C(v) __int_c(v, __int64_c_suffix)
# define UINT64_C(v) __uint_c(v, __int64_c_suffix)
# else
# define INT64_C(v) v
# define UINT64_C(v) v ## U
# endif /* __int64_c_suffix */
#define INT64_C(v) __INT64_C(v)
#define UINT64_C(v) __UINT64_C(v)
#endif /* __int_least64_t */
#ifdef __INT56_TYPE__
# undef __int32_c_suffix
# undef __int16_c_suffix
# undef __int8_c_suffix
# ifdef __INT56_C_SUFFIX__
# define INT56_C(v) __int_c(v, __INT56_C_SUFFIX__)
# define UINT56_C(v) __uint_c(v, __INT56_C_SUFFIX__)
# define __int32_c_suffix __INT56_C_SUFFIX__
# define __int16_c_suffix __INT56_C_SUFFIX__
# define __int8_c_suffix __INT56_C_SUFFIX__
# else
# define INT56_C(v) v
# define UINT56_C(v) v ## U
# endif /* __INT56_C_SUFFIX__ */
#define INT56_C(v) __INT56_C(v)
#define UINT56_C(v) __UINT56_C(v)
#endif /* __INT56_TYPE__ */
#ifdef __INT48_TYPE__
# undef __int32_c_suffix
# undef __int16_c_suffix
# undef __int8_c_suffix
# ifdef __INT48_C_SUFFIX__
# define INT48_C(v) __int_c(v, __INT48_C_SUFFIX__)
# define UINT48_C(v) __uint_c(v, __INT48_C_SUFFIX__)
# define __int32_c_suffix __INT48_C_SUFFIX__
# define __int16_c_suffix __INT48_C_SUFFIX__
# define __int8_c_suffix __INT48_C_SUFFIX__
# else
# define INT48_C(v) v
# define UINT48_C(v) v ## U
# endif /* __INT48_C_SUFFIX__ */
#define INT48_C(v) __INT48_C(v)
#define UINT48_C(v) __UINT48_C(v)
#endif /* __INT48_TYPE__ */
#ifdef __INT40_TYPE__
# undef __int32_c_suffix
# undef __int16_c_suffix
# undef __int8_c_suffix
# ifdef __INT40_C_SUFFIX__
# define INT40_C(v) __int_c(v, __INT40_C_SUFFIX__)
# define UINT40_C(v) __uint_c(v, __INT40_C_SUFFIX__)
# define __int32_c_suffix __INT40_C_SUFFIX__
# define __int16_c_suffix __INT40_C_SUFFIX__
# define __int8_c_suffix __INT40_C_SUFFIX__
# else
# define INT40_C(v) v
# define UINT40_C(v) v ## U
# endif /* __INT40_C_SUFFIX__ */
#define INT40_C(v) __INT40_C(v)
#define UINT40_C(v) __UINT40_C(v)
#endif /* __INT40_TYPE__ */
#ifdef __INT32_TYPE__
# undef __int32_c_suffix
# undef __int16_c_suffix
# undef __int8_c_suffix
# ifdef __INT32_C_SUFFIX__
# define __int32_c_suffix __INT32_C_SUFFIX__
# define __int16_c_suffix __INT32_C_SUFFIX__
# define __int8_c_suffix __INT32_C_SUFFIX__
# endif /* __INT32_C_SUFFIX__ */
#endif /* __INT32_TYPE__ */
#ifdef __int_least32_t
# ifdef __int32_c_suffix
# define INT32_C(v) __int_c(v, __int32_c_suffix)
# define UINT32_C(v) __uint_c(v, __int32_c_suffix)
# else
# define INT32_C(v) v
# define UINT32_C(v) v ## U
# endif /* __int32_c_suffix */
#define INT32_C(v) __INT32_C(v)
#define UINT32_C(v) __UINT32_C(v)
#endif /* __int_least32_t */
#ifdef __INT24_TYPE__
# undef __int16_c_suffix
# undef __int8_c_suffix
# ifdef __INT24_C_SUFFIX__
# define INT24_C(v) __int_c(v, __INT24_C_SUFFIX__)
# define UINT24_C(v) __uint_c(v, __INT24_C_SUFFIX__)
# define __int16_c_suffix __INT24_C_SUFFIX__
# define __int8_c_suffix __INT24_C_SUFFIX__
# else
# define INT24_C(v) v
# define UINT24_C(v) v ## U
# endif /* __INT24_C_SUFFIX__ */
#define INT24_C(v) __INT24_C(v)
#define UINT24_C(v) __UINT24_C(v)
#endif /* __INT24_TYPE__ */
#ifdef __INT16_TYPE__
# undef __int16_c_suffix
# undef __int8_c_suffix
# ifdef __INT16_C_SUFFIX__
# define __int16_c_suffix __INT16_C_SUFFIX__
# define __int8_c_suffix __INT16_C_SUFFIX__
# endif /* __INT16_C_SUFFIX__ */
#endif /* __INT16_TYPE__ */
#ifdef __int_least16_t
# ifdef __int16_c_suffix
# define INT16_C(v) __int_c(v, __int16_c_suffix)
# define UINT16_C(v) __uint_c(v, __int16_c_suffix)
# else
# define INT16_C(v) v
# define UINT16_C(v) v ## U
# endif /* __int16_c_suffix */
#define INT16_C(v) __INT16_C(v)
#define UINT16_C(v) __UINT16_C(v)
#endif /* __int_least16_t */
#ifdef __INT8_TYPE__
# undef __int8_c_suffix
# ifdef __INT8_C_SUFFIX__
# define __int8_c_suffix __INT8_C_SUFFIX__
# endif /* __INT8_C_SUFFIX__ */
#endif /* __INT8_TYPE__ */
#ifdef __int_least8_t
# ifdef __int8_c_suffix
# define INT8_C(v) __int_c(v, __int8_c_suffix)
# define UINT8_C(v) __uint_c(v, __int8_c_suffix)
# else
# define INT8_C(v) v
# define UINT8_C(v) v ## U
# endif /* __int8_c_suffix */
#define INT8_C(v) __INT8_C(v)
#define UINT8_C(v) __UINT8_C(v)
#endif /* __int_least8_t */
@ -938,8 +827,8 @@ typedef __UINTMAX_TYPE__ uintmax_t;
#endif
/* 7.18.4.2 Macros for greatest-width integer constants. */
#define INTMAX_C(v) __int_c(v, __INTMAX_C_SUFFIX__)
#define UINTMAX_C(v) __int_c(v, __UINTMAX_C_SUFFIX__)
#define INTMAX_C(v) __INTMAX_C(v)
#define UINTMAX_C(v) __UINTMAX_C(v)
/* C23 7.22.3.x Width of other integer types. */
#if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 202311L

View File

@ -7,6 +7,9 @@
*===-----------------------------------------------------------------------===
*/
#ifndef _VECINTRIN_H
#define _VECINTRIN_H
#if defined(__s390x__) && defined(__VEC__)
#define __ATTRS_ai __attribute__((__always_inline__))
@ -12861,3 +12864,5 @@ vec_search_string_until_zero_cc(__vector unsigned int __a,
#error "Use -fzvector to enable vector extensions"
#endif
#endif /* _VECINTRIN_H */

View File

@ -10,33 +10,19 @@
#ifndef __X86GPRINTRIN_H
#define __X86GPRINTRIN_H
#if !defined(__SCE__) || __has_feature(modules) || defined(__HRESET__)
#include <hresetintrin.h>
#endif
#if !defined(__SCE__) || __has_feature(modules) || defined(__UINTR__)
#include <uintrintrin.h>
#endif
#if !defined(__SCE__) || __has_feature(modules) || defined(__USERMSR__)
#include <usermsrintrin.h>
#endif
#if !defined(__SCE__) || __has_feature(modules) || defined(__CRC32__)
#include <crc32intrin.h>
#endif
#if !defined(__SCE__) || __has_feature(modules) || defined(__PRFCHI__)
#include <prfchiintrin.h>
#endif
#if !defined(__SCE__) || __has_feature(modules) || defined(__RAOINT__)
#include <raointintrin.h>
#endif
#if !defined(__SCE__) || __has_feature(modules) || defined(__CMPCCXADD__)
#include <cmpccxaddintrin.h>
#endif
#if defined(__i386__)
#define __SAVE_GPRBX "mov {%%ebx, %%eax |eax, ebx};"

View File

@ -14,40 +14,22 @@
#include <immintrin.h>
#if !defined(__SCE__) || __has_feature(modules) || defined(__PRFCHW__)
#include <prfchwintrin.h>
#endif
#if !defined(__SCE__) || __has_feature(modules) || defined(__SSE4A__)
#include <ammintrin.h>
#endif
#if !defined(__SCE__) || __has_feature(modules) || defined(__FMA4__)
#include <fma4intrin.h>
#endif
#if !defined(__SCE__) || __has_feature(modules) || defined(__XOP__)
#include <xopintrin.h>
#endif
#if !defined(__SCE__) || __has_feature(modules) || defined(__TBM__)
#include <tbmintrin.h>
#endif
#if !defined(__SCE__) || __has_feature(modules) || defined(__LWP__)
#include <lwpintrin.h>
#endif
#if !defined(__SCE__) || __has_feature(modules) || defined(__MWAITX__)
#include <mwaitxintrin.h>
#endif
#if !defined(__SCE__) || __has_feature(modules) || defined(__CLZERO__)
#include <clzerointrin.h>
#endif
#if !defined(__SCE__) || __has_feature(modules) || defined(__RDPRU__)
#include <rdpruintrin.h>
#endif
#endif /* __X86INTRIN_H */

View File

@ -2198,8 +2198,9 @@ _mm_storer_ps(float *__p, __m128 __a)
#define _MM_HINT_NTA 0
#ifndef _MSC_VER
/* FIXME: We have to #define this because "sel" must be a constant integer, and
Sema doesn't do any form of constant propagation yet. */
// If _MSC_VER is defined, we use the builtin variant of _mm_prefetch.
// Otherwise, we provide this macro, which includes a cast, allowing the user
// to pass a pointer of any time. The _mm_prefetch accepts char to match MSVC.
/// Loads one cache line of data from the specified address to a location
/// closer to the processor.

View File

@ -124,7 +124,7 @@ opterr:
.type optind, %object;
.size optind, 4
optind:
.data.rel.ro
.section .data.rel.ro,"aw"
.globl stderr
.type stderr, %object;
.size stderr, PTR_SIZE_BYTES

View File

@ -13,8 +13,10 @@
#include <__algorithm/for_each_segment.h>
#include <__algorithm/min.h>
#include <__config>
#include <__fwd/bit_reference.h>
#include <__iterator/iterator_traits.h>
#include <__iterator/segmented_iterator.h>
#include <__memory/pointer_traits.h>
#include <__type_traits/common_type.h>
#include <__type_traits/enable_if.h>
#include <__utility/move.h>
@ -29,9 +31,129 @@ _LIBCPP_PUSH_MACROS
_LIBCPP_BEGIN_NAMESPACE_STD
template <class _InputIterator, class _OutputIterator>
inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 _OutputIterator
copy(_InputIterator __first, _InputIterator __last, _OutputIterator __result);
template <class _InIter, class _Sent, class _OutIter>
inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 pair<_InIter, _OutIter> __copy(_InIter, _Sent, _OutIter);
template <class _Cp, bool _IsConst>
_LIBCPP_CONSTEXPR_SINCE_CXX20 _LIBCPP_HIDE_FROM_ABI __bit_iterator<_Cp, false> __copy_aligned(
__bit_iterator<_Cp, _IsConst> __first, __bit_iterator<_Cp, _IsConst> __last, __bit_iterator<_Cp, false> __result) {
using _In = __bit_iterator<_Cp, _IsConst>;
using difference_type = typename _In::difference_type;
using __storage_type = typename _In::__storage_type;
const int __bits_per_word = _In::__bits_per_word;
difference_type __n = __last - __first;
if (__n > 0) {
// do first word
if (__first.__ctz_ != 0) {
unsigned __clz = __bits_per_word - __first.__ctz_;
difference_type __dn = std::min(static_cast<difference_type>(__clz), __n);
__n -= __dn;
__storage_type __m = std::__middle_mask<__storage_type>(__clz - __dn, __first.__ctz_);
__storage_type __b = *__first.__seg_ & __m;
*__result.__seg_ &= ~__m;
*__result.__seg_ |= __b;
__result.__seg_ += (__dn + __result.__ctz_) / __bits_per_word;
__result.__ctz_ = static_cast<unsigned>((__dn + __result.__ctz_) % __bits_per_word);
++__first.__seg_;
// __first.__ctz_ = 0;
}
// __first.__ctz_ == 0;
// do middle words
__storage_type __nw = __n / __bits_per_word;
std::copy(std::__to_address(__first.__seg_),
std::__to_address(__first.__seg_ + __nw),
std::__to_address(__result.__seg_));
__n -= __nw * __bits_per_word;
__result.__seg_ += __nw;
// do last word
if (__n > 0) {
__first.__seg_ += __nw;
__storage_type __m = std::__trailing_mask<__storage_type>(__bits_per_word - __n);
__storage_type __b = *__first.__seg_ & __m;
*__result.__seg_ &= ~__m;
*__result.__seg_ |= __b;
__result.__ctz_ = static_cast<unsigned>(__n);
}
}
return __result;
}
template <class _Cp, bool _IsConst>
_LIBCPP_CONSTEXPR_SINCE_CXX20 _LIBCPP_HIDE_FROM_ABI __bit_iterator<_Cp, false> __copy_unaligned(
__bit_iterator<_Cp, _IsConst> __first, __bit_iterator<_Cp, _IsConst> __last, __bit_iterator<_Cp, false> __result) {
using _In = __bit_iterator<_Cp, _IsConst>;
using difference_type = typename _In::difference_type;
using __storage_type = typename _In::__storage_type;
const int __bits_per_word = _In::__bits_per_word;
difference_type __n = __last - __first;
if (__n > 0) {
// do first word
if (__first.__ctz_ != 0) {
unsigned __clz_f = __bits_per_word - __first.__ctz_;
difference_type __dn = std::min(static_cast<difference_type>(__clz_f), __n);
__n -= __dn;
__storage_type __m = std::__middle_mask<__storage_type>(__clz_f - __dn, __first.__ctz_);
__storage_type __b = *__first.__seg_ & __m;
unsigned __clz_r = __bits_per_word - __result.__ctz_;
__storage_type __ddn = std::min<__storage_type>(__dn, __clz_r);
__m = std::__middle_mask<__storage_type>(__clz_r - __ddn, __result.__ctz_);
*__result.__seg_ &= ~__m;
if (__result.__ctz_ > __first.__ctz_)
*__result.__seg_ |= __b << (__result.__ctz_ - __first.__ctz_);
else
*__result.__seg_ |= __b >> (__first.__ctz_ - __result.__ctz_);
__result.__seg_ += (__ddn + __result.__ctz_) / __bits_per_word;
__result.__ctz_ = static_cast<unsigned>((__ddn + __result.__ctz_) % __bits_per_word);
__dn -= __ddn;
if (__dn > 0) {
__m = std::__trailing_mask<__storage_type>(__bits_per_word - __dn);
*__result.__seg_ &= ~__m;
*__result.__seg_ |= __b >> (__first.__ctz_ + __ddn);
__result.__ctz_ = static_cast<unsigned>(__dn);
}
++__first.__seg_;
// __first.__ctz_ = 0;
}
// __first.__ctz_ == 0;
// do middle words
unsigned __clz_r = __bits_per_word - __result.__ctz_;
__storage_type __m = std::__leading_mask<__storage_type>(__result.__ctz_);
for (; __n >= __bits_per_word; __n -= __bits_per_word, ++__first.__seg_) {
__storage_type __b = *__first.__seg_;
*__result.__seg_ &= ~__m;
*__result.__seg_ |= __b << __result.__ctz_;
++__result.__seg_;
*__result.__seg_ &= __m;
*__result.__seg_ |= __b >> __clz_r;
}
// do last word
if (__n > 0) {
__m = std::__trailing_mask<__storage_type>(__bits_per_word - __n);
__storage_type __b = *__first.__seg_ & __m;
__storage_type __dn = std::min(__n, static_cast<difference_type>(__clz_r));
__m = std::__middle_mask<__storage_type>(__clz_r - __dn, __result.__ctz_);
*__result.__seg_ &= ~__m;
*__result.__seg_ |= __b << __result.__ctz_;
__result.__seg_ += (__dn + __result.__ctz_) / __bits_per_word;
__result.__ctz_ = static_cast<unsigned>((__dn + __result.__ctz_) % __bits_per_word);
__n -= __dn;
if (__n > 0) {
__m = std::__trailing_mask<__storage_type>(__bits_per_word - __n);
*__result.__seg_ &= ~__m;
*__result.__seg_ |= __b >> __dn;
__result.__ctz_ = static_cast<unsigned>(__n);
}
}
}
return __result;
}
struct __copy_impl {
template <class _InIter, class _Sent, class _OutIter>
_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 pair<_InIter, _OutIter>
@ -95,6 +217,16 @@ struct __copy_impl {
}
}
template <class _Cp, bool _IsConst>
_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 pair<__bit_iterator<_Cp, _IsConst>, __bit_iterator<_Cp, false> >
operator()(__bit_iterator<_Cp, _IsConst> __first,
__bit_iterator<_Cp, _IsConst> __last,
__bit_iterator<_Cp, false> __result) const {
if (__first.__ctz_ == __result.__ctz_)
return std::make_pair(__last, std::__copy_aligned(__first, __last, __result));
return std::make_pair(__last, std::__copy_unaligned(__first, __last, __result));
}
// At this point, the iterators have been unwrapped so any `contiguous_iterator` has been unwrapped to a pointer.
template <class _In, class _Out, __enable_if_t<__can_lower_copy_assignment_to_memmove<_In, _Out>::value, int> = 0>
_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 pair<_In*, _Out*>
@ -110,7 +242,7 @@ __copy(_InIter __first, _Sent __last, _OutIter __result) {
}
template <class _InputIterator, class _OutputIterator>
inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 _OutputIterator
_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 _OutputIterator
copy(_InputIterator __first, _InputIterator __last, _OutputIterator __result) {
return std::__copy(__first, __last, __result).second;
}

View File

@ -10,11 +10,14 @@
#define _LIBCPP___ALGORITHM_COPY_BACKWARD_H
#include <__algorithm/copy_move_common.h>
#include <__algorithm/copy_n.h>
#include <__algorithm/iterator_operations.h>
#include <__algorithm/min.h>
#include <__config>
#include <__fwd/bit_reference.h>
#include <__iterator/iterator_traits.h>
#include <__iterator/segmented_iterator.h>
#include <__memory/pointer_traits.h>
#include <__type_traits/common_type.h>
#include <__type_traits/enable_if.h>
#include <__type_traits/is_constructible.h>
@ -34,6 +37,124 @@ template <class _AlgPolicy, class _InIter, class _Sent, class _OutIter>
_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 pair<_InIter, _OutIter>
__copy_backward(_InIter __first, _Sent __last, _OutIter __result);
template <class _Cp, bool _IsConst>
_LIBCPP_CONSTEXPR_SINCE_CXX20 _LIBCPP_HIDE_FROM_ABI __bit_iterator<_Cp, false> __copy_backward_aligned(
__bit_iterator<_Cp, _IsConst> __first, __bit_iterator<_Cp, _IsConst> __last, __bit_iterator<_Cp, false> __result) {
using _In = __bit_iterator<_Cp, _IsConst>;
using difference_type = typename _In::difference_type;
using __storage_type = typename _In::__storage_type;
const int __bits_per_word = _In::__bits_per_word;
difference_type __n = __last - __first;
if (__n > 0) {
// do first word
if (__last.__ctz_ != 0) {
difference_type __dn = std::min(static_cast<difference_type>(__last.__ctz_), __n);
__n -= __dn;
unsigned __clz = __bits_per_word - __last.__ctz_;
__storage_type __m = std::__middle_mask<__storage_type>(__clz, __last.__ctz_ - __dn);
__storage_type __b = *__last.__seg_ & __m;
*__result.__seg_ &= ~__m;
*__result.__seg_ |= __b;
__result.__ctz_ = static_cast<unsigned>(((-__dn & (__bits_per_word - 1)) + __result.__ctz_) % __bits_per_word);
// __last.__ctz_ = 0
}
// __last.__ctz_ == 0 || __n == 0
// __result.__ctz_ == 0 || __n == 0
// do middle words
__storage_type __nw = __n / __bits_per_word;
__result.__seg_ -= __nw;
__last.__seg_ -= __nw;
std::copy_n(std::__to_address(__last.__seg_), __nw, std::__to_address(__result.__seg_));
__n -= __nw * __bits_per_word;
// do last word
if (__n > 0) {
__storage_type __m = std::__leading_mask<__storage_type>(__bits_per_word - __n);
__storage_type __b = *--__last.__seg_ & __m;
*--__result.__seg_ &= ~__m;
*__result.__seg_ |= __b;
__result.__ctz_ = static_cast<unsigned>(-__n & (__bits_per_word - 1));
}
}
return __result;
}
template <class _Cp, bool _IsConst>
_LIBCPP_CONSTEXPR_SINCE_CXX20 _LIBCPP_HIDE_FROM_ABI __bit_iterator<_Cp, false> __copy_backward_unaligned(
__bit_iterator<_Cp, _IsConst> __first, __bit_iterator<_Cp, _IsConst> __last, __bit_iterator<_Cp, false> __result) {
using _In = __bit_iterator<_Cp, _IsConst>;
using difference_type = typename _In::difference_type;
using __storage_type = typename _In::__storage_type;
const int __bits_per_word = _In::__bits_per_word;
difference_type __n = __last - __first;
if (__n > 0) {
// do first word
if (__last.__ctz_ != 0) {
difference_type __dn = std::min(static_cast<difference_type>(__last.__ctz_), __n);
__n -= __dn;
unsigned __clz_l = __bits_per_word - __last.__ctz_;
__storage_type __m = std::__middle_mask<__storage_type>(__clz_l, __last.__ctz_ - __dn);
__storage_type __b = *__last.__seg_ & __m;
unsigned __clz_r = __bits_per_word - __result.__ctz_;
__storage_type __ddn = std::min(__dn, static_cast<difference_type>(__result.__ctz_));
if (__ddn > 0) {
__m = std::__middle_mask<__storage_type>(__clz_r, __result.__ctz_ - __ddn);
*__result.__seg_ &= ~__m;
if (__result.__ctz_ > __last.__ctz_)
*__result.__seg_ |= __b << (__result.__ctz_ - __last.__ctz_);
else
*__result.__seg_ |= __b >> (__last.__ctz_ - __result.__ctz_);
__result.__ctz_ = static_cast<unsigned>(((-__ddn & (__bits_per_word - 1)) + __result.__ctz_) % __bits_per_word);
__dn -= __ddn;
}
if (__dn > 0) {
// __result.__ctz_ == 0
--__result.__seg_;
__result.__ctz_ = static_cast<unsigned>(-__dn & (__bits_per_word - 1));
__m = std::__leading_mask<__storage_type>(__result.__ctz_);
*__result.__seg_ &= ~__m;
__last.__ctz_ -= __dn + __ddn;
*__result.__seg_ |= __b << (__result.__ctz_ - __last.__ctz_);
}
// __last.__ctz_ = 0
}
// __last.__ctz_ == 0 || __n == 0
// __result.__ctz_ != 0 || __n == 0
// do middle words
unsigned __clz_r = __bits_per_word - __result.__ctz_;
__storage_type __m = std::__trailing_mask<__storage_type>(__clz_r);
for (; __n >= __bits_per_word; __n -= __bits_per_word) {
__storage_type __b = *--__last.__seg_;
*__result.__seg_ &= ~__m;
*__result.__seg_ |= __b >> __clz_r;
*--__result.__seg_ &= __m;
*__result.__seg_ |= __b << __result.__ctz_;
}
// do last word
if (__n > 0) {
__m = std::__leading_mask<__storage_type>(__bits_per_word - __n);
__storage_type __b = *--__last.__seg_ & __m;
__clz_r = __bits_per_word - __result.__ctz_;
__storage_type __dn = std::min(__n, static_cast<difference_type>(__result.__ctz_));
__m = std::__middle_mask<__storage_type>(__clz_r, __result.__ctz_ - __dn);
*__result.__seg_ &= ~__m;
*__result.__seg_ |= __b >> (__bits_per_word - __result.__ctz_);
__result.__ctz_ = static_cast<unsigned>(((-__dn & (__bits_per_word - 1)) + __result.__ctz_) % __bits_per_word);
__n -= __dn;
if (__n > 0) {
// __result.__ctz_ == 0
--__result.__seg_;
__result.__ctz_ = static_cast<unsigned>(-__n & (__bits_per_word - 1));
__m = std::__leading_mask<__storage_type>(__result.__ctz_);
*__result.__seg_ &= ~__m;
*__result.__seg_ |= __b << (__result.__ctz_ - (__bits_per_word - __n - __dn));
}
}
}
return __result;
}
template <class _AlgPolicy>
struct __copy_backward_impl {
template <class _InIter, class _Sent, class _OutIter>
@ -107,6 +228,16 @@ struct __copy_backward_impl {
}
}
template <class _Cp, bool _IsConst>
_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 pair<__bit_iterator<_Cp, _IsConst>, __bit_iterator<_Cp, false> >
operator()(__bit_iterator<_Cp, _IsConst> __first,
__bit_iterator<_Cp, _IsConst> __last,
__bit_iterator<_Cp, false> __result) {
if (__last.__ctz_ == __result.__ctz_)
return std::make_pair(__last, std::__copy_backward_aligned(__first, __last, __result));
return std::make_pair(__last, std::__copy_backward_unaligned(__first, __last, __result));
}
// At this point, the iterators have been unwrapped so any `contiguous_iterator` has been unwrapped to a pointer.
template <class _In, class _Out, __enable_if_t<__can_lower_copy_assignment_to_memmove<_In, _Out>::value, int> = 0>
_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 pair<_In*, _Out*>

View File

@ -55,18 +55,18 @@ __count_bool(__bit_iterator<_Cp, _IsConst> __first, typename __size_difference_t
if (__first.__ctz_ != 0) {
__storage_type __clz_f = static_cast<__storage_type>(__bits_per_word - __first.__ctz_);
__storage_type __dn = std::min(__clz_f, __n);
__storage_type __m = (~__storage_type(0) << __first.__ctz_) & (~__storage_type(0) >> (__clz_f - __dn));
__r = std::__libcpp_popcount(std::__invert_if<!_ToCount>(*__first.__seg_) & __m);
__storage_type __m = std::__middle_mask<__storage_type>(__clz_f - __dn, __first.__ctz_);
__r = std::__popcount(__storage_type(std::__invert_if<!_ToCount>(*__first.__seg_) & __m));
__n -= __dn;
++__first.__seg_;
}
// do middle whole words
for (; __n >= __bits_per_word; ++__first.__seg_, __n -= __bits_per_word)
__r += std::__libcpp_popcount(std::__invert_if<!_ToCount>(*__first.__seg_));
__r += std::__popcount(std::__invert_if<!_ToCount>(*__first.__seg_));
// do last partial word
if (__n > 0) {
__storage_type __m = ~__storage_type(0) >> (__bits_per_word - __n);
__r += std::__libcpp_popcount(std::__invert_if<!_ToCount>(*__first.__seg_) & __m);
__storage_type __m = std::__trailing_mask<__storage_type>(__bits_per_word - __n);
__r += std::__popcount(__storage_type(std::__invert_if<!_ToCount>(*__first.__seg_) & __m));
}
return __r;
}

View File

@ -11,16 +11,20 @@
#define _LIBCPP___ALGORITHM_EQUAL_H
#include <__algorithm/comp.h>
#include <__algorithm/min.h>
#include <__algorithm/unwrap_iter.h>
#include <__config>
#include <__functional/identity.h>
#include <__fwd/bit_reference.h>
#include <__iterator/distance.h>
#include <__iterator/iterator_traits.h>
#include <__memory/pointer_traits.h>
#include <__string/constexpr_c_functions.h>
#include <__type_traits/desugars_to.h>
#include <__type_traits/enable_if.h>
#include <__type_traits/invoke.h>
#include <__type_traits/is_equality_comparable.h>
#include <__type_traits/is_same.h>
#include <__type_traits/is_volatile.h>
#include <__utility/move.h>
@ -33,6 +37,140 @@ _LIBCPP_PUSH_MACROS
_LIBCPP_BEGIN_NAMESPACE_STD
template <class _Cp, bool _IsConst1, bool _IsConst2>
[[__nodiscard__]] _LIBCPP_CONSTEXPR_SINCE_CXX20 _LIBCPP_HIDE_FROM_ABI bool
__equal_unaligned(__bit_iterator<_Cp, _IsConst1> __first1,
__bit_iterator<_Cp, _IsConst1> __last1,
__bit_iterator<_Cp, _IsConst2> __first2) {
using _It = __bit_iterator<_Cp, _IsConst1>;
using difference_type = typename _It::difference_type;
using __storage_type = typename _It::__storage_type;
const int __bits_per_word = _It::__bits_per_word;
difference_type __n = __last1 - __first1;
if (__n > 0) {
// do first word
if (__first1.__ctz_ != 0) {
unsigned __clz_f = __bits_per_word - __first1.__ctz_;
difference_type __dn = std::min(static_cast<difference_type>(__clz_f), __n);
__n -= __dn;
__storage_type __m = std::__middle_mask<__storage_type>(__clz_f - __dn, __first1.__ctz_);
__storage_type __b = *__first1.__seg_ & __m;
unsigned __clz_r = __bits_per_word - __first2.__ctz_;
__storage_type __ddn = std::min<__storage_type>(__dn, __clz_r);
__m = std::__middle_mask<__storage_type>(__clz_r - __ddn, __first2.__ctz_);
if (__first2.__ctz_ > __first1.__ctz_) {
if (static_cast<__storage_type>(*__first2.__seg_ & __m) !=
static_cast<__storage_type>(__b << (__first2.__ctz_ - __first1.__ctz_)))
return false;
} else {
if (static_cast<__storage_type>(*__first2.__seg_ & __m) !=
static_cast<__storage_type>(__b >> (__first1.__ctz_ - __first2.__ctz_)))
return false;
}
__first2.__seg_ += (__ddn + __first2.__ctz_) / __bits_per_word;
__first2.__ctz_ = static_cast<unsigned>((__ddn + __first2.__ctz_) % __bits_per_word);
__dn -= __ddn;
if (__dn > 0) {
__m = std::__trailing_mask<__storage_type>(__bits_per_word - __n);
if (static_cast<__storage_type>(*__first2.__seg_ & __m) !=
static_cast<__storage_type>(__b >> (__first1.__ctz_ + __ddn)))
return false;
__first2.__ctz_ = static_cast<unsigned>(__dn);
}
++__first1.__seg_;
// __first1.__ctz_ = 0;
}
// __first1.__ctz_ == 0;
// do middle words
unsigned __clz_r = __bits_per_word - __first2.__ctz_;
__storage_type __m = std::__leading_mask<__storage_type>(__first2.__ctz_);
for (; __n >= __bits_per_word; __n -= __bits_per_word, ++__first1.__seg_) {
__storage_type __b = *__first1.__seg_;
if (static_cast<__storage_type>(*__first2.__seg_ & __m) != static_cast<__storage_type>(__b << __first2.__ctz_))
return false;
++__first2.__seg_;
if (static_cast<__storage_type>(*__first2.__seg_ & static_cast<__storage_type>(~__m)) !=
static_cast<__storage_type>(__b >> __clz_r))
return false;
}
// do last word
if (__n > 0) {
__m = std::__trailing_mask<__storage_type>(__bits_per_word - __n);
__storage_type __b = *__first1.__seg_ & __m;
__storage_type __dn = std::min(__n, static_cast<difference_type>(__clz_r));
__m = std::__middle_mask<__storage_type>(__clz_r - __dn, __first2.__ctz_);
if (static_cast<__storage_type>(*__first2.__seg_ & __m) != static_cast<__storage_type>(__b << __first2.__ctz_))
return false;
__first2.__seg_ += (__dn + __first2.__ctz_) / __bits_per_word;
__first2.__ctz_ = static_cast<unsigned>((__dn + __first2.__ctz_) % __bits_per_word);
__n -= __dn;
if (__n > 0) {
__m = std::__trailing_mask<__storage_type>(__bits_per_word - __n);
if (static_cast<__storage_type>(*__first2.__seg_ & __m) != static_cast<__storage_type>(__b >> __dn))
return false;
}
}
}
return true;
}
template <class _Cp, bool _IsConst1, bool _IsConst2>
[[__nodiscard__]] _LIBCPP_CONSTEXPR_SINCE_CXX20 _LIBCPP_HIDE_FROM_ABI bool
__equal_aligned(__bit_iterator<_Cp, _IsConst1> __first1,
__bit_iterator<_Cp, _IsConst1> __last1,
__bit_iterator<_Cp, _IsConst2> __first2) {
using _It = __bit_iterator<_Cp, _IsConst1>;
using difference_type = typename _It::difference_type;
using __storage_type = typename _It::__storage_type;
const int __bits_per_word = _It::__bits_per_word;
difference_type __n = __last1 - __first1;
if (__n > 0) {
// do first word
if (__first1.__ctz_ != 0) {
unsigned __clz = __bits_per_word - __first1.__ctz_;
difference_type __dn = std::min(static_cast<difference_type>(__clz), __n);
__n -= __dn;
__storage_type __m = std::__middle_mask<__storage_type>(__clz - __dn, __first1.__ctz_);
if ((*__first2.__seg_ & __m) != (*__first1.__seg_ & __m))
return false;
++__first2.__seg_;
++__first1.__seg_;
// __first1.__ctz_ = 0;
// __first2.__ctz_ = 0;
}
// __first1.__ctz_ == 0;
// __first2.__ctz_ == 0;
// do middle words
for (; __n >= __bits_per_word; __n -= __bits_per_word, ++__first1.__seg_, ++__first2.__seg_)
if (*__first2.__seg_ != *__first1.__seg_)
return false;
// do last word
if (__n > 0) {
__storage_type __m = std::__trailing_mask<__storage_type>(__bits_per_word - __n);
if ((*__first2.__seg_ & __m) != (*__first1.__seg_ & __m))
return false;
}
}
return true;
}
template <class _Cp,
bool _IsConst1,
bool _IsConst2,
class _BinaryPredicate,
__enable_if_t<__desugars_to_v<__equal_tag, _BinaryPredicate, bool, bool>, int> = 0>
[[__nodiscard__]] inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 bool __equal_iter_impl(
__bit_iterator<_Cp, _IsConst1> __first1,
__bit_iterator<_Cp, _IsConst1> __last1,
__bit_iterator<_Cp, _IsConst2> __first2,
_BinaryPredicate) {
if (__first1.__ctz_ == __first2.__ctz_)
return std::__equal_aligned(__first1, __last1, __first2);
return std::__equal_unaligned(__first1, __last1, __first2);
}
template <class _InputIterator1, class _InputIterator2, class _BinaryPredicate>
[[__nodiscard__]] inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 bool __equal_iter_impl(
_InputIterator1 __first1, _InputIterator1 __last1, _InputIterator2 __first2, _BinaryPredicate& __pred) {
@ -94,6 +232,28 @@ __equal_impl(_Tp* __first1, _Tp* __last1, _Up* __first2, _Up*, _Pred&, _Proj1&,
return std::__constexpr_memcmp_equal(__first1, __first2, __element_count(__last1 - __first1));
}
template <class _Cp,
bool _IsConst1,
bool _IsConst2,
class _Pred,
class _Proj1,
class _Proj2,
__enable_if_t<__desugars_to_v<__equal_tag, _Pred, bool, bool> && __is_identity<_Proj1>::value &&
__is_identity<_Proj2>::value,
int> = 0>
[[__nodiscard__]] inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 bool __equal_impl(
__bit_iterator<_Cp, _IsConst1> __first1,
__bit_iterator<_Cp, _IsConst1> __last1,
__bit_iterator<_Cp, _IsConst2> __first2,
__bit_iterator<_Cp, _IsConst2>,
_Pred&,
_Proj1&,
_Proj2&) {
if (__first1.__ctz_ == __first2.__ctz_)
return std::__equal_aligned(__first1, __last1, __first2);
return std::__equal_unaligned(__first1, __last1, __first2);
}
template <class _InputIterator1, class _InputIterator2, class _BinaryPredicate>
[[__nodiscard__]] inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 bool
equal(_InputIterator1 __first1,

View File

@ -41,11 +41,7 @@ __fill_n_bool(__bit_iterator<_Cp, false> __first, typename __size_difference_typ
if (__first.__ctz_ != 0) {
__storage_type __clz_f = static_cast<__storage_type>(__bits_per_word - __first.__ctz_);
__storage_type __dn = std::min(__clz_f, __n);
__storage_type __m = (~__storage_type(0) << __first.__ctz_) & (~__storage_type(0) >> (__clz_f - __dn));
if (_FillVal)
*__first.__seg_ |= __m;
else
*__first.__seg_ &= ~__m;
std::__fill_masked_range(std::__to_address(__first.__seg_), __clz_f - __dn, __first.__ctz_, _FillVal);
__n -= __dn;
++__first.__seg_;
}
@ -56,11 +52,7 @@ __fill_n_bool(__bit_iterator<_Cp, false> __first, typename __size_difference_typ
// do last partial word
if (__n > 0) {
__first.__seg_ += __nw;
__storage_type __m = ~__storage_type(0) >> (__bits_per_word - __n);
if (_FillVal)
*__first.__seg_ |= __m;
else
*__first.__seg_ &= ~__m;
std::__fill_masked_range(std::__to_address(__first.__seg_), __bits_per_word - __n, 0u, _FillVal);
}
}

View File

@ -106,10 +106,10 @@ __find_bool(__bit_iterator<_Cp, _IsConst> __first, typename __size_difference_ty
if (__first.__ctz_ != 0) {
__storage_type __clz_f = static_cast<__storage_type>(__bits_per_word - __first.__ctz_);
__storage_type __dn = std::min(__clz_f, __n);
__storage_type __m = (~__storage_type(0) << __first.__ctz_) & (~__storage_type(0) >> (__clz_f - __dn));
__storage_type __m = std::__middle_mask<__storage_type>(__clz_f - __dn, __first.__ctz_);
__storage_type __b = std::__invert_if<!_ToFind>(*__first.__seg_) & __m;
if (__b)
return _It(__first.__seg_, static_cast<unsigned>(std::__libcpp_ctz(__b)));
return _It(__first.__seg_, static_cast<unsigned>(std::__countr_zero(__b)));
if (__n == __dn)
return __first + __n;
__n -= __dn;
@ -119,14 +119,14 @@ __find_bool(__bit_iterator<_Cp, _IsConst> __first, typename __size_difference_ty
for (; __n >= __bits_per_word; ++__first.__seg_, __n -= __bits_per_word) {
__storage_type __b = std::__invert_if<!_ToFind>(*__first.__seg_);
if (__b)
return _It(__first.__seg_, static_cast<unsigned>(std::__libcpp_ctz(__b)));
return _It(__first.__seg_, static_cast<unsigned>(std::__countr_zero(__b)));
}
// do last partial word
if (__n > 0) {
__storage_type __m = ~__storage_type(0) >> (__bits_per_word - __n);
__storage_type __m = std::__trailing_mask<__storage_type>(__bits_per_word - __n);
__storage_type __b = std::__invert_if<!_ToFind>(*__first.__seg_) & __m;
if (__b)
return _It(__first.__seg_, static_cast<unsigned>(std::__libcpp_ctz(__b)));
return _It(__first.__seg_, static_cast<unsigned>(std::__countr_zero(__b)));
}
return _It(__first.__seg_, static_cast<unsigned>(__n));
}

View File

@ -12,9 +12,10 @@
#include <__algorithm/for_each_segment.h>
#include <__config>
#include <__functional/identity.h>
#include <__iterator/segmented_iterator.h>
#include <__ranges/movable_box.h>
#include <__utility/in_place.h>
#include <__type_traits/enable_if.h>
#include <__type_traits/invoke.h>
#include <__utility/move.h>
#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER)
@ -26,28 +27,36 @@ _LIBCPP_PUSH_MACROS
_LIBCPP_BEGIN_NAMESPACE_STD
template <class _InputIterator, class _Function>
_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 _Function
for_each(_InputIterator __first, _InputIterator __last, _Function __f) {
template <class _InputIterator, class _Sent, class _Func, class _Proj>
_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 _InputIterator
__for_each(_InputIterator __first, _Sent __last, _Func& __f, _Proj& __proj) {
for (; __first != __last; ++__first)
__f(*__first);
return __f;
std::__invoke(__f, std::__invoke(__proj, *__first));
return __first;
}
// __movable_box is available in C++20, but is actually a copyable-box, so optimization is only correct in C++23
#if _LIBCPP_STD_VER >= 23
template <class _SegmentedIterator, class _Function>
requires __is_segmented_iterator<_SegmentedIterator>::value
_LIBCPP_HIDE_FROM_ABI constexpr _Function
for_each(_SegmentedIterator __first, _SegmentedIterator __last, _Function __func) {
ranges::__movable_box<_Function> __wrapped_func(in_place, std::move(__func));
std::__for_each_segment(__first, __last, [&](auto __lfirst, auto __llast) {
__wrapped_func =
ranges::__movable_box<_Function>(in_place, std::for_each(__lfirst, __llast, std::move(*__wrapped_func)));
#ifndef _LIBCPP_CXX03_LANG
template <class _SegmentedIterator,
class _Func,
class _Proj,
__enable_if_t<__is_segmented_iterator<_SegmentedIterator>::value, int> = 0>
_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 _SegmentedIterator
__for_each(_SegmentedIterator __first, _SegmentedIterator __last, _Func& __func, _Proj& __proj) {
using __local_iterator_t = typename __segmented_iterator_traits<_SegmentedIterator>::__local_iterator;
std::__for_each_segment(__first, __last, [&](__local_iterator_t __lfirst, __local_iterator_t __llast) {
std::__for_each(__lfirst, __llast, __func, __proj);
});
return std::move(*__wrapped_func);
return __last;
}
#endif // !_LIBCPP_CXX03_LANG
template <class _InputIterator, class _Func>
_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 _Func
for_each(_InputIterator __first, _InputIterator __last, _Func __f) {
__identity __proj;
std::__for_each(__first, __last, __f, __proj);
return __f;
}
#endif // _LIBCPP_STD_VER >= 23
_LIBCPP_END_NAMESPACE_STD

View File

@ -10,32 +10,93 @@
#ifndef _LIBCPP___ALGORITHM_FOR_EACH_N_H
#define _LIBCPP___ALGORITHM_FOR_EACH_N_H
#include <__algorithm/for_each.h>
#include <__algorithm/for_each_n_segment.h>
#include <__config>
#include <__functional/identity.h>
#include <__iterator/iterator_traits.h>
#include <__iterator/segmented_iterator.h>
#include <__type_traits/disjunction.h>
#include <__type_traits/enable_if.h>
#include <__type_traits/invoke.h>
#include <__type_traits/negation.h>
#include <__utility/convert_to_integral.h>
#include <__utility/move.h>
#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER)
# pragma GCC system_header
#endif
_LIBCPP_PUSH_MACROS
#include <__undef_macros>
_LIBCPP_BEGIN_NAMESPACE_STD
#if _LIBCPP_STD_VER >= 17
template <class _InputIterator, class _Size, class _Function>
inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 _InputIterator
for_each_n(_InputIterator __first, _Size __orig_n, _Function __f) {
template <class _InputIterator,
class _Size,
class _Func,
class _Proj,
__enable_if_t<!__has_random_access_iterator_category<_InputIterator>::value &&
_Or< _Not<__is_segmented_iterator<_InputIterator> >,
_Not<__has_random_access_local_iterator<_InputIterator> > >::value,
int> = 0>
_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 _InputIterator
__for_each_n(_InputIterator __first, _Size __orig_n, _Func& __f, _Proj& __proj) {
typedef decltype(std::__convert_to_integral(__orig_n)) _IntegralSize;
_IntegralSize __n = __orig_n;
while (__n > 0) {
__f(*__first);
std::__invoke(__f, std::__invoke(__proj, *__first));
++__first;
--__n;
}
return __first;
return std::move(__first);
}
#endif
template <class _RandIter,
class _Size,
class _Func,
class _Proj,
__enable_if_t<__has_random_access_iterator_category<_RandIter>::value, int> = 0>
_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 _RandIter
__for_each_n(_RandIter __first, _Size __orig_n, _Func& __f, _Proj& __proj) {
typename std::iterator_traits<_RandIter>::difference_type __n = __orig_n;
auto __last = __first + __n;
std::__for_each(__first, __last, __f, __proj);
return __last;
}
#ifndef _LIBCPP_CXX03_LANG
template <class _SegmentedIterator,
class _Size,
class _Func,
class _Proj,
__enable_if_t<!__has_random_access_iterator_category<_SegmentedIterator>::value &&
__is_segmented_iterator<_SegmentedIterator>::value &&
__has_random_access_iterator_category<
typename __segmented_iterator_traits<_SegmentedIterator>::__local_iterator>::value,
int> = 0>
_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 _SegmentedIterator
__for_each_n(_SegmentedIterator __first, _Size __orig_n, _Func& __f, _Proj& __proj) {
using __local_iterator_t = typename __segmented_iterator_traits<_SegmentedIterator>::__local_iterator;
return std::__for_each_n_segment(__first, __orig_n, [&](__local_iterator_t __lfirst, __local_iterator_t __llast) {
std::__for_each(__lfirst, __llast, __f, __proj);
});
}
#endif // !_LIBCPP_CXX03_LANG
#if _LIBCPP_STD_VER >= 17
template <class _InputIterator, class _Size, class _Func>
inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 _InputIterator
for_each_n(_InputIterator __first, _Size __orig_n, _Func __f) {
__identity __proj;
return std::__for_each_n(__first, __orig_n, __f, __proj);
}
#endif // _LIBCPP_STD_VER >= 17
_LIBCPP_END_NAMESPACE_STD
_LIBCPP_POP_MACROS
#endif // _LIBCPP___ALGORITHM_FOR_EACH_N_H

View File

@ -0,0 +1,63 @@
//===----------------------------------------------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
#ifndef _LIBCPP___ALGORITHM_FOR_EACH_N_SEGMENT_H
#define _LIBCPP___ALGORITHM_FOR_EACH_N_SEGMENT_H
#include <__config>
#include <__iterator/iterator_traits.h>
#include <__iterator/segmented_iterator.h>
#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER)
# pragma GCC system_header
#endif
_LIBCPP_BEGIN_NAMESPACE_STD
// __for_each_n_segment optimizes linear iteration over segmented iterators. It processes a segmented
// input range [__first, __first + __n) by applying the functor __func to each element within the segment.
// The return value of __func is ignored, and the function returns an iterator pointing to one past the
// last processed element in the input range.
template <class _SegmentedIterator, class _Size, class _Functor>
_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 _SegmentedIterator
__for_each_n_segment(_SegmentedIterator __first, _Size __orig_n, _Functor __func) {
static_assert(__is_segmented_iterator<_SegmentedIterator>::value &&
__has_random_access_iterator_category<
typename __segmented_iterator_traits<_SegmentedIterator>::__local_iterator>::value,
"__for_each_n_segment only works with segmented iterators with random-access local iterators");
if (__orig_n <= 0)
return __first;
using _Traits = __segmented_iterator_traits<_SegmentedIterator>;
using __local_iter_t = typename _Traits::__local_iterator;
using __difference_t = typename std::iterator_traits<__local_iter_t>::difference_type;
__difference_t __n = __orig_n;
auto __seg = _Traits::__segment(__first);
auto __local_first = _Traits::__local(__first);
__local_iter_t __local_last;
while (__n > 0) {
__local_last = _Traits::__end(__seg);
auto __seg_size = __local_last - __local_first;
if (__n <= __seg_size) {
__local_last = __local_first + __n;
__func(__local_first, __local_last);
break;
}
__func(__local_first, __local_last);
__n -= __seg_size;
__local_first = _Traits::__begin(++__seg);
}
return _Traits::__compose(__seg, __local_last);
}
_LIBCPP_END_NAMESPACE_STD
#endif // _LIBCPP___ALGORITHM_FOR_EACH_N_SEGMENT_H

View File

@ -22,6 +22,7 @@
#include <__functional/identity.h>
#include <__iterator/iterator_traits.h>
#include <__iterator/reverse_iterator.h>
#include <__memory/construct_at.h>
#include <__memory/destruct_n.h>
#include <__memory/unique_ptr.h>
#include <__memory/unique_temporary_buffer.h>
@ -106,13 +107,13 @@ _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 void __buffered_inplace_merg
value_type* __p = __buff;
for (_BidirectionalIterator __i = __first; __i != __middle;
__d.template __incr<value_type>(), (void)++__i, (void)++__p)
::new ((void*)__p) value_type(_IterOps<_AlgPolicy>::__iter_move(__i));
std::__construct_at(__p, _IterOps<_AlgPolicy>::__iter_move(__i));
std::__half_inplace_merge<_AlgPolicy>(__buff, __p, __middle, __last, __first, __comp);
} else {
value_type* __p = __buff;
for (_BidirectionalIterator __i = __middle; __i != __last;
__d.template __incr<value_type>(), (void)++__i, (void)++__p)
::new ((void*)__p) value_type(_IterOps<_AlgPolicy>::__iter_move(__i));
std::__construct_at(__p, _IterOps<_AlgPolicy>::__iter_move(__i));
typedef reverse_iterator<_BidirectionalIterator> _RBi;
typedef reverse_iterator<value_type*> _Rv;
typedef __invert<_Compare> _Inverted;
@ -203,7 +204,7 @@ _LIBCPP_CONSTEXPR_SINCE_CXX26 void __inplace_merge(
}
template <class _AlgPolicy, class _BidirectionalIterator, class _Compare>
_LIBCPP_HIDE_FROM_ABI void __inplace_merge(
_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 void __inplace_merge(
_BidirectionalIterator __first, _BidirectionalIterator __middle, _BidirectionalIterator __last, _Compare&& __comp) {
typedef typename iterator_traits<_BidirectionalIterator>::value_type value_type;
typedef typename iterator_traits<_BidirectionalIterator>::difference_type difference_type;
@ -223,14 +224,14 @@ _LIBCPP_HIDE_FROM_ABI void __inplace_merge(
}
template <class _BidirectionalIterator, class _Compare>
inline _LIBCPP_HIDE_FROM_ABI void inplace_merge(
inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 void inplace_merge(
_BidirectionalIterator __first, _BidirectionalIterator __middle, _BidirectionalIterator __last, _Compare __comp) {
std::__inplace_merge<_ClassicAlgPolicy>(
std::move(__first), std::move(__middle), std::move(__last), static_cast<__comp_ref_type<_Compare> >(__comp));
}
template <class _BidirectionalIterator>
inline _LIBCPP_HIDE_FROM_ABI void
inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 void
inplace_merge(_BidirectionalIterator __first, _BidirectionalIterator __middle, _BidirectionalIterator __last) {
std::inplace_merge(std::move(__first), std::move(__middle), std::move(__last), __less<>());
}

View File

@ -29,7 +29,7 @@ _LIBCPP_BEGIN_NAMESPACE_STD
template <class _Comp, class _Iter, class _Sent, class _Proj>
inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 _Iter
__min_element(_Iter __first, _Sent __last, _Comp __comp, _Proj& __proj) {
__min_element(_Iter __first, _Sent __last, _Comp& __comp, _Proj& __proj) {
if (__first == __last)
return __first;

View File

@ -9,11 +9,13 @@
#ifndef _LIBCPP___ALGORITHM_MOVE_H
#define _LIBCPP___ALGORITHM_MOVE_H
#include <__algorithm/copy.h>
#include <__algorithm/copy_move_common.h>
#include <__algorithm/for_each_segment.h>
#include <__algorithm/iterator_operations.h>
#include <__algorithm/min.h>
#include <__config>
#include <__fwd/bit_reference.h>
#include <__iterator/iterator_traits.h>
#include <__iterator/segmented_iterator.h>
#include <__type_traits/common_type.h>
@ -98,6 +100,14 @@ struct __move_impl {
}
}
template <class _Cp, bool _IsConst>
_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 pair<__bit_iterator<_Cp, _IsConst>, __bit_iterator<_Cp, false> >
operator()(__bit_iterator<_Cp, _IsConst> __first,
__bit_iterator<_Cp, _IsConst> __last,
__bit_iterator<_Cp, false> __result) {
return std::__copy(__first, __last, __result);
}
// At this point, the iterators have been unwrapped so any `contiguous_iterator` has been unwrapped to a pointer.
template <class _In, class _Out, __enable_if_t<__can_lower_move_assignment_to_memmove<_In, _Out>::value, int> = 0>
_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 pair<_In*, _Out*>

View File

@ -9,10 +9,12 @@
#ifndef _LIBCPP___ALGORITHM_MOVE_BACKWARD_H
#define _LIBCPP___ALGORITHM_MOVE_BACKWARD_H
#include <__algorithm/copy_backward.h>
#include <__algorithm/copy_move_common.h>
#include <__algorithm/iterator_operations.h>
#include <__algorithm/min.h>
#include <__config>
#include <__fwd/bit_reference.h>
#include <__iterator/iterator_traits.h>
#include <__iterator/segmented_iterator.h>
#include <__type_traits/common_type.h>
@ -107,6 +109,14 @@ struct __move_backward_impl {
}
}
template <class _Cp, bool _IsConst>
_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 pair<__bit_iterator<_Cp, _IsConst>, __bit_iterator<_Cp, false> >
operator()(__bit_iterator<_Cp, _IsConst> __first,
__bit_iterator<_Cp, _IsConst> __last,
__bit_iterator<_Cp, false> __result) {
return std::__copy_backward<_ClassicAlgPolicy>(__first, __last, __result);
}
// At this point, the iterators have been unwrapped so any `contiguous_iterator` has been unwrapped to a pointer.
template <class _In, class _Out, __enable_if_t<__can_lower_move_assignment_to_memmove<_In, _Out>::value, int> = 0>
_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 pair<_In*, _Out*>

View File

@ -0,0 +1,56 @@
// -*- C++ -*-
//===----------------------------------------------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
#ifndef _LIBCPP___ALGORITHM_OUT_VALUE_RESULT_H
#define _LIBCPP___ALGORITHM_OUT_VALUE_RESULT_H
#include <__concepts/convertible_to.h>
#include <__config>
#include <__utility/move.h>
#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER)
# pragma GCC system_header
#endif
_LIBCPP_PUSH_MACROS
#include <__undef_macros>
_LIBCPP_BEGIN_NAMESPACE_STD
#if _LIBCPP_STD_VER >= 23
namespace ranges {
template <class _OutIter1, class _ValType1>
struct out_value_result {
_LIBCPP_NO_UNIQUE_ADDRESS _OutIter1 out;
_LIBCPP_NO_UNIQUE_ADDRESS _ValType1 value;
template <class _OutIter2, class _ValType2>
requires convertible_to<const _OutIter1&, _OutIter2> && convertible_to<const _ValType1&, _ValType2>
_LIBCPP_HIDE_FROM_ABI constexpr operator out_value_result<_OutIter2, _ValType2>() const& {
return {out, value};
}
template <class _OutIter2, class _ValType2>
requires convertible_to<_OutIter1, _OutIter2> && convertible_to<_ValType1, _ValType2>
_LIBCPP_HIDE_FROM_ABI constexpr operator out_value_result<_OutIter2, _ValType2>() && {
return {std::move(out), std::move(value)};
}
};
} // namespace ranges
#endif // _LIBCPP_STD_VER >= 23
_LIBCPP_END_NAMESPACE_STD
_LIBCPP_POP_MACROS
#endif // _LIBCPP___ALGORITHM_OUT_VALUE_RESULT_H

View File

@ -29,10 +29,12 @@
#include <__algorithm/for_each.h>
#include <__algorithm/move.h>
#include <__bit/bit_cast.h>
#include <__bit/bit_log2.h>
#include <__bit/countl.h>
#include <__config>
#include <__cstddef/size_t.h>
#include <__functional/identity.h>
#include <__iterator/access.h>
#include <__iterator/distance.h>
#include <__iterator/iterator_traits.h>
#include <__iterator/move_iterator.h>
@ -43,9 +45,12 @@
#include <__type_traits/enable_if.h>
#include <__type_traits/invoke.h>
#include <__type_traits/is_assignable.h>
#include <__type_traits/is_enum.h>
#include <__type_traits/is_integral.h>
#include <__type_traits/is_unsigned.h>
#include <__type_traits/make_unsigned.h>
#include <__type_traits/void_t.h>
#include <__utility/declval.h>
#include <__utility/forward.h>
#include <__utility/integer_sequence.h>
#include <__utility/move.h>
@ -67,7 +72,7 @@ _LIBCPP_BEGIN_NAMESPACE_STD
#if _LIBCPP_STD_VER >= 14
template <class _InputIterator, class _OutputIterator>
_LIBCPP_HIDE_FROM_ABI pair<_OutputIterator, __iter_value_type<_InputIterator>>
_LIBCPP_HIDE_FROM_ABI constexpr pair<_OutputIterator, __iter_value_type<_InputIterator>>
__partial_sum_max(_InputIterator __first, _InputIterator __last, _OutputIterator __result) {
if (__first == __last)
return {__result, 0};
@ -109,7 +114,7 @@ struct __counting_sort_traits {
};
template <class _Radix, class _Integer>
_LIBCPP_HIDE_FROM_ABI auto __nth_radix(size_t __radix_number, _Radix __radix, _Integer __n) {
_LIBCPP_HIDE_FROM_ABI constexpr auto __nth_radix(size_t __radix_number, _Radix __radix, _Integer __n) {
static_assert(is_unsigned<_Integer>::value);
using __traits = __counting_sort_traits<_Integer, _Radix>;
@ -117,7 +122,7 @@ _LIBCPP_HIDE_FROM_ABI auto __nth_radix(size_t __radix_number, _Radix __radix, _I
}
template <class _ForwardIterator, class _Map, class _RandomAccessIterator>
_LIBCPP_HIDE_FROM_ABI void
_LIBCPP_HIDE_FROM_ABI constexpr void
__collect(_ForwardIterator __first, _ForwardIterator __last, _Map __map, _RandomAccessIterator __counters) {
using __value_type = __iter_value_type<_ForwardIterator>;
using __traits = __counting_sort_traits<__value_type, _Map>;
@ -129,7 +134,7 @@ __collect(_ForwardIterator __first, _ForwardIterator __last, _Map __map, _Random
}
template <class _ForwardIterator, class _RandomAccessIterator1, class _Map, class _RandomAccessIterator2>
_LIBCPP_HIDE_FROM_ABI void
_LIBCPP_HIDE_FROM_ABI constexpr void
__dispose(_ForwardIterator __first,
_ForwardIterator __last,
_RandomAccessIterator1 __result,
@ -147,7 +152,7 @@ template <class _ForwardIterator,
class _RandomAccessIterator1,
class _RandomAccessIterator2,
size_t... _Radices>
_LIBCPP_HIDE_FROM_ABI bool __collect_impl(
_LIBCPP_HIDE_FROM_ABI constexpr bool __collect_impl(
_ForwardIterator __first,
_ForwardIterator __last,
_Map __map,
@ -177,7 +182,7 @@ _LIBCPP_HIDE_FROM_ABI bool __collect_impl(
}
template <class _ForwardIterator, class _Map, class _Radix, class _RandomAccessIterator1, class _RandomAccessIterator2>
_LIBCPP_HIDE_FROM_ABI bool
_LIBCPP_HIDE_FROM_ABI constexpr bool
__collect(_ForwardIterator __first,
_ForwardIterator __last,
_Map __map,
@ -191,7 +196,7 @@ __collect(_ForwardIterator __first,
}
template <class _BidirectionalIterator, class _RandomAccessIterator1, class _Map, class _RandomAccessIterator2>
_LIBCPP_HIDE_FROM_ABI void __dispose_backward(
_LIBCPP_HIDE_FROM_ABI constexpr void __dispose_backward(
_BidirectionalIterator __first,
_BidirectionalIterator __last,
_RandomAccessIterator1 __result,
@ -206,7 +211,7 @@ _LIBCPP_HIDE_FROM_ABI void __dispose_backward(
}
template <class _ForwardIterator, class _RandomAccessIterator, class _Map>
_LIBCPP_HIDE_FROM_ABI _RandomAccessIterator
_LIBCPP_HIDE_FROM_ABI constexpr _RandomAccessIterator
__counting_sort_impl(_ForwardIterator __first, _ForwardIterator __last, _RandomAccessIterator __result, _Map __map) {
using __value_type = __iter_value_type<_ForwardIterator>;
using __traits = __counting_sort_traits<__value_type, _Map>;
@ -225,7 +230,7 @@ template <class _RandomAccessIterator1,
class _Radix,
enable_if_t< __radix_sort_traits<__iter_value_type<_RandomAccessIterator1>, _Map, _Radix>::__radix_count == 1,
int> = 0>
_LIBCPP_HIDE_FROM_ABI void __radix_sort_impl(
_LIBCPP_HIDE_FROM_ABI constexpr void __radix_sort_impl(
_RandomAccessIterator1 __first,
_RandomAccessIterator1 __last,
_RandomAccessIterator2 __buffer,
@ -245,7 +250,7 @@ template <
class _Radix,
enable_if_t< __radix_sort_traits<__iter_value_type<_RandomAccessIterator1>, _Map, _Radix>::__radix_count % 2 == 0,
int> = 0 >
_LIBCPP_HIDE_FROM_ABI void __radix_sort_impl(
_LIBCPP_HIDE_FROM_ABI constexpr void __radix_sort_impl(
_RandomAccessIterator1 __first,
_RandomAccessIterator1 __last,
_RandomAccessIterator2 __buffer_begin,
@ -297,6 +302,96 @@ _LIBCPP_HIDE_FROM_ABI constexpr auto __shift_to_unsigned(_Ip __n) {
return static_cast<make_unsigned_t<_Ip> >(__n ^ __min_value);
}
template <size_t _Size>
struct __unsigned_integer_of_size;
template <>
struct __unsigned_integer_of_size<1> {
using type _LIBCPP_NODEBUG = uint8_t;
};
template <>
struct __unsigned_integer_of_size<2> {
using type _LIBCPP_NODEBUG = uint16_t;
};
template <>
struct __unsigned_integer_of_size<4> {
using type _LIBCPP_NODEBUG = uint32_t;
};
template <>
struct __unsigned_integer_of_size<8> {
using type _LIBCPP_NODEBUG = uint64_t;
};
# if _LIBCPP_HAS_INT128
template <>
struct __unsigned_integer_of_size<16> {
using type _LIBCPP_NODEBUG = unsigned __int128;
};
# endif
template <size_t _Size>
using __unsigned_integer_of_size_t _LIBCPP_NODEBUG = typename __unsigned_integer_of_size<_Size>::type;
template <class _Sc>
using __unsigned_representation_for_t _LIBCPP_NODEBUG = __unsigned_integer_of_size_t<sizeof(_Sc)>;
// The function `__to_ordered_integral` is defined for integers and IEEE 754 floating-point numbers.
// Returns an integer representation such that for any `x` and `y` such that `x < y`, the expression
// `__to_ordered_integral(x) < __to_ordered_integral(y)` is true, where `x`, `y` are integers or IEEE 754 floats.
template <class _Integral, enable_if_t< is_integral<_Integral>::value, int> = 0>
_LIBCPP_HIDE_FROM_ABI constexpr auto __to_ordered_integral(_Integral __n) {
return __n;
}
// An overload for IEEE 754 floating-point numbers
// For the floats conforming to IEEE 754 (IEC 559) standard, we know that:
// 1. The bit representation of positive floats directly reflects their order:
// When comparing floats by magnitude, the number with the larger exponent is greater, and if the exponents are
// equal, the one with the larger mantissa is greater.
// 2. The bit representation of negative floats reflects their reverse order (for the same reasons).
// 3. The most significant bit (sign bit) is zero for positive floats and one for negative floats. Therefore, in the raw
// bit representation, any negative number will be greater than any positive number.
// The only exception from this rule is `NaN`, which is unordered by definition.
// Based on the above, to obtain correctly ordered integral representation of floating-point numbers, we need to:
// 1. Invert the bit representation (including the sign bit) of negative floats to switch from reverse order to direct
// order;
// 2. Invert the sign bit for positive floats.
// Thus, in final integral representation, we have reversed the order for negative floats and made all negative floats
// smaller than all positive numbers (by inverting the sign bit).
template <class _Floating, enable_if_t< numeric_limits<_Floating>::is_iec559, int> = 0>
_LIBCPP_HIDE_FROM_ABI constexpr auto __to_ordered_integral(_Floating __f) {
using __integral_type = __unsigned_representation_for_t<_Floating>;
constexpr auto __bit_count = std::numeric_limits<__integral_type>::digits;
constexpr auto __sign_bit_mask = static_cast<__integral_type>(__integral_type{1} << (__bit_count - 1));
const auto __u = std::__bit_cast<__integral_type>(__f);
return static_cast<__integral_type>(__u & __sign_bit_mask ? ~__u : __u ^ __sign_bit_mask);
}
// There may exist user-defined comparison for enum, so we cannot compare enums just like integers.
template <class _Enum, enable_if_t< is_enum<_Enum>::value, int> = 0>
_LIBCPP_HIDE_FROM_ABI constexpr auto __to_ordered_integral(_Enum __e) = delete;
// `long double` varies significantly across platforms and compilers, making it practically
// impossible to determine its actual bit width for conversion to an ordered integer.
inline _LIBCPP_HIDE_FROM_ABI constexpr auto __to_ordered_integral(long double) = delete;
template <class _Tp, class = void>
inline const bool __is_ordered_integer_representable_v = false;
template <class _Tp>
inline const bool
__is_ordered_integer_representable_v<_Tp, __void_t<decltype(std::__to_ordered_integral(std::declval<_Tp>()))>> =
true;
struct __low_byte_fn {
template <class _Ip>
_LIBCPP_HIDE_FROM_ABI constexpr uint8_t operator()(_Ip __integer) const {
@ -307,18 +402,20 @@ struct __low_byte_fn {
};
template <class _RandomAccessIterator1, class _RandomAccessIterator2, class _Map, class _Radix>
_LIBCPP_HIDE_FROM_ABI void
_LIBCPP_HIDE_FROM_ABI constexpr void
__radix_sort(_RandomAccessIterator1 __first,
_RandomAccessIterator1 __last,
_RandomAccessIterator2 __buffer,
_Map __map,
_Radix __radix) {
auto __map_to_unsigned = [__map = std::move(__map)](const auto& __x) { return std::__shift_to_unsigned(__map(__x)); };
auto __map_to_unsigned = [__map = std::move(__map)](const auto& __x) {
return std::__shift_to_unsigned(__map(std::__to_ordered_integral(__x)));
};
std::__radix_sort_impl(__first, __last, __buffer, __map_to_unsigned, __radix);
}
template <class _RandomAccessIterator1, class _RandomAccessIterator2>
_LIBCPP_HIDE_FROM_ABI void
_LIBCPP_HIDE_FROM_ABI constexpr void
__radix_sort(_RandomAccessIterator1 __first, _RandomAccessIterator1 __last, _RandomAccessIterator2 __buffer) {
std::__radix_sort(__first, __last, __buffer, __identity{}, __low_byte_fn{});
}

View File

@ -9,10 +9,12 @@
#ifndef _LIBCPP___ALGORITHM_RANGES_FOR_EACH_H
#define _LIBCPP___ALGORITHM_RANGES_FOR_EACH_H
#include <__algorithm/for_each.h>
#include <__algorithm/for_each_n.h>
#include <__algorithm/in_fun_result.h>
#include <__concepts/assignable.h>
#include <__config>
#include <__functional/identity.h>
#include <__functional/invoke.h>
#include <__iterator/concepts.h>
#include <__iterator/projected.h>
#include <__ranges/access.h>
@ -41,9 +43,17 @@ private:
template <class _Iter, class _Sent, class _Proj, class _Func>
_LIBCPP_HIDE_FROM_ABI constexpr static for_each_result<_Iter, _Func>
__for_each_impl(_Iter __first, _Sent __last, _Func& __func, _Proj& __proj) {
for (; __first != __last; ++__first)
std::invoke(__func, std::invoke(__proj, *__first));
return {std::move(__first), std::move(__func)};
// In the case where we have different iterator and sentinel types, the segmented iterator optimization
// in std::for_each will not kick in. Therefore, we prefer std::for_each_n in that case (whenever we can
// obtain the `n`).
if constexpr (!std::assignable_from<_Iter&, _Sent> && std::sized_sentinel_for<_Sent, _Iter>) {
auto __n = __last - __first;
auto __end = std::__for_each_n(std::move(__first), __n, __func, __proj);
return {std::move(__end), std::move(__func)};
} else {
auto __end = std::__for_each(std::move(__first), std::move(__last), __func, __proj);
return {std::move(__end), std::move(__func)};
}
}
public:

View File

@ -9,10 +9,10 @@
#ifndef _LIBCPP___ALGORITHM_RANGES_FOR_EACH_N_H
#define _LIBCPP___ALGORITHM_RANGES_FOR_EACH_N_H
#include <__algorithm/for_each_n.h>
#include <__algorithm/in_fun_result.h>
#include <__config>
#include <__functional/identity.h>
#include <__functional/invoke.h>
#include <__iterator/concepts.h>
#include <__iterator/incrementable_traits.h>
#include <__iterator/iterator_traits.h>
@ -40,11 +40,8 @@ struct __for_each_n {
template <input_iterator _Iter, class _Proj = identity, indirectly_unary_invocable<projected<_Iter, _Proj>> _Func>
_LIBCPP_HIDE_FROM_ABI constexpr for_each_n_result<_Iter, _Func>
operator()(_Iter __first, iter_difference_t<_Iter> __count, _Func __func, _Proj __proj = {}) const {
while (__count-- > 0) {
std::invoke(__func, std::invoke(__proj, *__first));
++__first;
}
return {std::move(__first), std::move(__func)};
auto __last = std::__for_each_n(std::move(__first), __count, __func, __proj);
return {std::move(__last), std::move(__func)};
}
};

View File

@ -41,7 +41,7 @@ _LIBCPP_BEGIN_NAMESPACE_STD
namespace ranges {
struct __inplace_merge {
template <class _Iter, class _Sent, class _Comp, class _Proj>
_LIBCPP_HIDE_FROM_ABI static constexpr auto
_LIBCPP_HIDE_FROM_ABI static _LIBCPP_CONSTEXPR_SINCE_CXX26 auto
__inplace_merge_impl(_Iter __first, _Iter __middle, _Sent __last, _Comp&& __comp, _Proj&& __proj) {
auto __last_iter = ranges::next(__middle, __last);
std::__inplace_merge<_RangeAlgPolicy>(
@ -51,7 +51,7 @@ struct __inplace_merge {
template <bidirectional_iterator _Iter, sentinel_for<_Iter> _Sent, class _Comp = ranges::less, class _Proj = identity>
requires sortable<_Iter, _Comp, _Proj>
_LIBCPP_HIDE_FROM_ABI _Iter
_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 _Iter
operator()(_Iter __first, _Iter __middle, _Sent __last, _Comp __comp = {}, _Proj __proj = {}) const {
return __inplace_merge_impl(
std::move(__first), std::move(__middle), std::move(__last), std::move(__comp), std::move(__proj));
@ -59,7 +59,7 @@ struct __inplace_merge {
template <bidirectional_range _Range, class _Comp = ranges::less, class _Proj = identity>
requires sortable<iterator_t<_Range>, _Comp, _Proj>
_LIBCPP_HIDE_FROM_ABI borrowed_iterator_t<_Range>
_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 borrowed_iterator_t<_Range>
operator()(_Range&& __range, iterator_t<_Range> __middle, _Comp __comp = {}, _Proj __proj = {}) const {
return __inplace_merge_impl(
ranges::begin(__range), std::move(__middle), ranges::end(__range), std::move(__comp), std::move(__proj));

View File

@ -44,7 +44,7 @@ consteval auto __get_iterator_concept() {
}
template <class _Iter>
using __iterator_concept _LIBCPP_NODEBUG = decltype(__get_iterator_concept<_Iter>());
using __iterator_concept _LIBCPP_NODEBUG = decltype(ranges::__get_iterator_concept<_Iter>());
} // namespace ranges
_LIBCPP_END_NAMESPACE_STD

View File

@ -9,7 +9,7 @@
#ifndef _LIBCPP___ALGORITHM_RANGES_MAX_H
#define _LIBCPP___ALGORITHM_RANGES_MAX_H
#include <__algorithm/ranges_min_element.h>
#include <__algorithm/min_element.h>
#include <__assert>
#include <__concepts/copyable.h>
#include <__config>
@ -57,7 +57,7 @@ struct __max {
__il.begin() != __il.end(), "initializer_list must contain at least one element");
auto __comp_lhs_rhs_swapped = [&](auto&& __lhs, auto&& __rhs) -> bool { return std::invoke(__comp, __rhs, __lhs); };
return *ranges::__min_element_impl(__il.begin(), __il.end(), __comp_lhs_rhs_swapped, __proj);
return *std::__min_element(__il.begin(), __il.end(), __comp_lhs_rhs_swapped, __proj);
}
template <input_range _Rp,
@ -75,7 +75,7 @@ struct __max {
auto __comp_lhs_rhs_swapped = [&](auto&& __lhs, auto&& __rhs) -> bool {
return std::invoke(__comp, __rhs, __lhs);
};
return *ranges::__min_element_impl(std::move(__first), std::move(__last), __comp_lhs_rhs_swapped, __proj);
return *std::__min_element(std::move(__first), std::move(__last), __comp_lhs_rhs_swapped, __proj);
} else {
range_value_t<_Rp> __result = *__first;
while (++__first != __last) {

View File

@ -9,7 +9,7 @@
#ifndef _LIBCPP___ALGORITHM_RANGES_MAX_ELEMENT_H
#define _LIBCPP___ALGORITHM_RANGES_MAX_ELEMENT_H
#include <__algorithm/ranges_min_element.h>
#include <__algorithm/min_element.h>
#include <__config>
#include <__functional/identity.h>
#include <__functional/invoke.h>
@ -40,7 +40,7 @@ struct __max_element {
[[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr _Ip
operator()(_Ip __first, _Sp __last, _Comp __comp = {}, _Proj __proj = {}) const {
auto __comp_lhs_rhs_swapped = [&](auto&& __lhs, auto&& __rhs) -> bool { return std::invoke(__comp, __rhs, __lhs); };
return ranges::__min_element_impl(__first, __last, __comp_lhs_rhs_swapped, __proj);
return std::__min_element(__first, __last, __comp_lhs_rhs_swapped, __proj);
}
template <forward_range _Rp,
@ -49,7 +49,7 @@ struct __max_element {
[[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr borrowed_iterator_t<_Rp>
operator()(_Rp&& __r, _Comp __comp = {}, _Proj __proj = {}) const {
auto __comp_lhs_rhs_swapped = [&](auto&& __lhs, auto&& __rhs) -> bool { return std::invoke(__comp, __rhs, __lhs); };
return ranges::__min_element_impl(ranges::begin(__r), ranges::end(__r), __comp_lhs_rhs_swapped, __proj);
return std::__min_element(ranges::begin(__r), ranges::end(__r), __comp_lhs_rhs_swapped, __proj);
}
};

View File

@ -9,7 +9,7 @@
#ifndef _LIBCPP___ALGORITHM_RANGES_MIN_H
#define _LIBCPP___ALGORITHM_RANGES_MIN_H
#include <__algorithm/ranges_min_element.h>
#include <__algorithm/min_element.h>
#include <__assert>
#include <__concepts/copyable.h>
#include <__config>
@ -54,7 +54,7 @@ struct __min {
operator()(initializer_list<_Tp> __il, _Comp __comp = {}, _Proj __proj = {}) const {
_LIBCPP_ASSERT_VALID_ELEMENT_ACCESS(
__il.begin() != __il.end(), "initializer_list must contain at least one element");
return *ranges::__min_element_impl(__il.begin(), __il.end(), __comp, __proj);
return *std::__min_element(__il.begin(), __il.end(), __comp, __proj);
}
template <input_range _Rp,
@ -67,7 +67,7 @@ struct __min {
auto __last = ranges::end(__r);
_LIBCPP_ASSERT_VALID_ELEMENT_ACCESS(__first != __last, "range must contain at least one element");
if constexpr (forward_range<_Rp> && !__is_cheap_to_copy<range_value_t<_Rp>>) {
return *ranges::__min_element_impl(__first, __last, __comp, __proj);
return *std::__min_element(__first, __last, __comp, __proj);
} else {
range_value_t<_Rp> __result = *__first;
while (++__first != __last) {

View File

@ -9,6 +9,7 @@
#ifndef _LIBCPP___ALGORITHM_RANGES_MIN_ELEMENT_H
#define _LIBCPP___ALGORITHM_RANGES_MIN_ELEMENT_H
#include <__algorithm/min_element.h>
#include <__config>
#include <__functional/identity.h>
#include <__functional/invoke.h>
@ -32,20 +33,6 @@ _LIBCPP_PUSH_MACROS
_LIBCPP_BEGIN_NAMESPACE_STD
namespace ranges {
// TODO(ranges): `ranges::min_element` can now simply delegate to `std::__min_element`.
template <class _Ip, class _Sp, class _Proj, class _Comp>
_LIBCPP_HIDE_FROM_ABI constexpr _Ip __min_element_impl(_Ip __first, _Sp __last, _Comp& __comp, _Proj& __proj) {
if (__first == __last)
return __first;
_Ip __i = __first;
while (++__i != __last)
if (std::invoke(__comp, std::invoke(__proj, *__i), std::invoke(__proj, *__first)))
__first = __i;
return __first;
}
struct __min_element {
template <forward_iterator _Ip,
sentinel_for<_Ip> _Sp,
@ -53,7 +40,7 @@ struct __min_element {
indirect_strict_weak_order<projected<_Ip, _Proj>> _Comp = ranges::less>
[[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr _Ip
operator()(_Ip __first, _Sp __last, _Comp __comp = {}, _Proj __proj = {}) const {
return ranges::__min_element_impl(__first, __last, __comp, __proj);
return std::__min_element(__first, __last, __comp, __proj);
}
template <forward_range _Rp,
@ -61,7 +48,7 @@ struct __min_element {
indirect_strict_weak_order<projected<iterator_t<_Rp>, _Proj>> _Comp = ranges::less>
[[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr borrowed_iterator_t<_Rp>
operator()(_Rp&& __r, _Comp __comp = {}, _Proj __proj = {}) const {
return ranges::__min_element_impl(ranges::begin(__r), ranges::end(__r), __comp, __proj);
return std::__min_element(ranges::begin(__r), ranges::end(__r), __comp, __proj);
}
};

View File

@ -44,7 +44,7 @@ _LIBCPP_BEGIN_NAMESPACE_STD
namespace ranges {
struct __stable_partition {
template <class _Iter, class _Sent, class _Proj, class _Pred>
_LIBCPP_HIDE_FROM_ABI static subrange<__remove_cvref_t<_Iter>>
_LIBCPP_HIDE_FROM_ABI static _LIBCPP_CONSTEXPR_SINCE_CXX26 subrange<__remove_cvref_t<_Iter>>
__stable_partition_fn_impl(_Iter&& __first, _Sent&& __last, _Pred&& __pred, _Proj&& __proj) {
auto __last_iter = ranges::next(__first, __last);
@ -60,7 +60,8 @@ struct __stable_partition {
class _Proj = identity,
indirect_unary_predicate<projected<_Iter, _Proj>> _Pred>
requires permutable<_Iter>
_LIBCPP_HIDE_FROM_ABI subrange<_Iter> operator()(_Iter __first, _Sent __last, _Pred __pred, _Proj __proj = {}) const {
_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 subrange<_Iter>
operator()(_Iter __first, _Sent __last, _Pred __pred, _Proj __proj = {}) const {
return __stable_partition_fn_impl(__first, __last, __pred, __proj);
}
@ -68,7 +69,7 @@ struct __stable_partition {
class _Proj = identity,
indirect_unary_predicate<projected<iterator_t<_Range>, _Proj>> _Pred>
requires permutable<iterator_t<_Range>>
_LIBCPP_HIDE_FROM_ABI borrowed_subrange_t<_Range>
_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 borrowed_subrange_t<_Range>
operator()(_Range&& __range, _Pred __pred, _Proj __proj = {}) const {
return __stable_partition_fn_impl(ranges::begin(__range), ranges::end(__range), __pred, __proj);
}

View File

@ -41,7 +41,8 @@ _LIBCPP_BEGIN_NAMESPACE_STD
namespace ranges {
struct __stable_sort {
template <class _Iter, class _Sent, class _Comp, class _Proj>
_LIBCPP_HIDE_FROM_ABI static _Iter __stable_sort_fn_impl(_Iter __first, _Sent __last, _Comp& __comp, _Proj& __proj) {
_LIBCPP_HIDE_FROM_ABI static _LIBCPP_CONSTEXPR_SINCE_CXX26 _Iter
__stable_sort_fn_impl(_Iter __first, _Sent __last, _Comp& __comp, _Proj& __proj) {
auto __last_iter = ranges::next(__first, __last);
auto&& __projected_comp = std::__make_projected(__comp, __proj);
@ -52,13 +53,14 @@ struct __stable_sort {
template <random_access_iterator _Iter, sentinel_for<_Iter> _Sent, class _Comp = ranges::less, class _Proj = identity>
requires sortable<_Iter, _Comp, _Proj>
_LIBCPP_HIDE_FROM_ABI _Iter operator()(_Iter __first, _Sent __last, _Comp __comp = {}, _Proj __proj = {}) const {
_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 _Iter
operator()(_Iter __first, _Sent __last, _Comp __comp = {}, _Proj __proj = {}) const {
return __stable_sort_fn_impl(std::move(__first), std::move(__last), __comp, __proj);
}
template <random_access_range _Range, class _Comp = ranges::less, class _Proj = identity>
requires sortable<iterator_t<_Range>, _Comp, _Proj>
_LIBCPP_HIDE_FROM_ABI borrowed_iterator_t<_Range>
_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 borrowed_iterator_t<_Range>
operator()(_Range&& __r, _Comp __comp = {}, _Proj __proj = {}) const {
return __stable_sort_fn_impl(ranges::begin(__r), ranges::end(__r), __comp, __proj);
}

View File

@ -9,12 +9,19 @@
#ifndef _LIBCPP___ALGORITHM_ROTATE_H
#define _LIBCPP___ALGORITHM_ROTATE_H
#include <__algorithm/copy.h>
#include <__algorithm/copy_backward.h>
#include <__algorithm/iterator_operations.h>
#include <__algorithm/move.h>
#include <__algorithm/move_backward.h>
#include <__algorithm/swap_ranges.h>
#include <__config>
#include <__cstddef/size_t.h>
#include <__fwd/bit_reference.h>
#include <__iterator/iterator_traits.h>
#include <__memory/construct_at.h>
#include <__memory/pointer_traits.h>
#include <__type_traits/is_constant_evaluated.h>
#include <__type_traits/is_trivially_assignable.h>
#include <__utility/move.h>
#include <__utility/pair.h>
@ -185,6 +192,44 @@ __rotate(_Iterator __first, _Iterator __middle, _Sentinel __last) {
return _Ret(std::move(__result), std::move(__last_iter));
}
template <class, class _Cp>
_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 pair<__bit_iterator<_Cp, false>, __bit_iterator<_Cp, false> >
__rotate(__bit_iterator<_Cp, false> __first, __bit_iterator<_Cp, false> __middle, __bit_iterator<_Cp, false> __last) {
using _I1 = __bit_iterator<_Cp, false>;
using difference_type = typename _I1::difference_type;
difference_type __d1 = __middle - __first;
difference_type __d2 = __last - __middle;
_I1 __r = __first + __d2;
while (__d1 != 0 && __d2 != 0) {
if (__d1 <= __d2) {
if (__d1 <= __bit_array<_Cp>::capacity()) {
__bit_array<_Cp> __b(__d1);
std::copy(__first, __middle, __b.begin());
std::copy(__b.begin(), __b.end(), std::copy(__middle, __last, __first));
break;
} else {
__bit_iterator<_Cp, false> __mp = std::swap_ranges(__first, __middle, __middle);
__first = __middle;
__middle = __mp;
__d2 -= __d1;
}
} else {
if (__d2 <= __bit_array<_Cp>::capacity()) {
__bit_array<_Cp> __b(__d2);
std::copy(__middle, __last, __b.begin());
std::copy_backward(__b.begin(), __b.end(), std::copy_backward(__first, __middle, __last));
break;
} else {
__bit_iterator<_Cp, false> __mp = __first + __d2;
std::swap_ranges(__first, __mp, __middle);
__first = __mp;
__d1 -= __d2;
}
}
}
return std::make_pair(__r, __last);
}
template <class _ForwardIterator>
inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 _ForwardIterator
rotate(_ForwardIterator __first, _ForwardIterator __middle, _ForwardIterator __last) {

View File

@ -15,8 +15,6 @@
#include <__bit/countr.h>
#include <__config>
#include <__cstddef/size_t.h>
#include <__type_traits/is_arithmetic.h>
#include <__type_traits/is_same.h>
#include <__utility/integer_sequence.h>
#include <cstdint>
@ -28,7 +26,9 @@ _LIBCPP_PUSH_MACROS
#include <__undef_macros>
// TODO: Find out how altivec changes things and allow vectorizations there too.
#if _LIBCPP_STD_VER >= 14 && defined(_LIBCPP_CLANG_VER) && !defined(__ALTIVEC__)
// TODO: Simplify this condition once we stop building with AppleClang 15 in the CI.
#if _LIBCPP_STD_VER >= 14 && defined(_LIBCPP_COMPILER_CLANG_BASED) && !defined(__ALTIVEC__) && \
!(defined(_LIBCPP_APPLE_CLANG_VER) && _LIBCPP_APPLE_CLANG_VER < 1600)
# define _LIBCPP_HAS_ALGORITHM_VECTOR_UTILS 1
#else
# define _LIBCPP_HAS_ALGORITHM_VECTOR_UTILS 0
@ -53,20 +53,20 @@ struct __get_as_integer_type_impl;
template <>
struct __get_as_integer_type_impl<1> {
using type = uint8_t;
using type _LIBCPP_NODEBUG = uint8_t;
};
template <>
struct __get_as_integer_type_impl<2> {
using type = uint16_t;
using type _LIBCPP_NODEBUG = uint16_t;
};
template <>
struct __get_as_integer_type_impl<4> {
using type = uint32_t;
using type _LIBCPP_NODEBUG = uint32_t;
};
template <>
struct __get_as_integer_type_impl<8> {
using type = uint64_t;
using type _LIBCPP_NODEBUG = uint64_t;
};
template <class _Tp>
@ -78,7 +78,7 @@ using __get_as_integer_type_t _LIBCPP_NODEBUG = typename __get_as_integer_type_i
# if defined(__AVX__) || defined(__MVS__)
template <class _Tp>
inline constexpr size_t __native_vector_size = 32 / sizeof(_Tp);
# elif defined(__SSE__) || defined(__ARM_NEON__)
# elif defined(__SSE__) || defined(__ARM_NEON)
template <class _Tp>
inline constexpr size_t __native_vector_size = 16 / sizeof(_Tp);
# elif defined(__MMX__)

View File

@ -17,6 +17,7 @@
#include <__algorithm/partial_sort.h>
#include <__algorithm/unwrap_iter.h>
#include <__assert>
#include <__bit/bit_log2.h>
#include <__bit/blsr.h>
#include <__bit/countl.h>
#include <__bit/countr.h>
@ -34,7 +35,7 @@
#include <__type_traits/is_constant_evaluated.h>
#include <__type_traits/is_same.h>
#include <__type_traits/is_trivially_copyable.h>
#include <__type_traits/remove_cvref.h>
#include <__type_traits/make_unsigned.h>
#include <__utility/move.h>
#include <__utility/pair.h>
#include <climits>
@ -52,8 +53,7 @@ _LIBCPP_BEGIN_NAMESPACE_STD
template <class _Compare, class _Iter, class _Tp = typename iterator_traits<_Iter>::value_type>
inline const bool __use_branchless_sort =
__libcpp_is_contiguous_iterator<_Iter>::value && __is_cheap_to_copy<_Tp> && is_arithmetic<_Tp>::value &&
(__desugars_to_v<__less_tag, __remove_cvref_t<_Compare>, _Tp, _Tp> ||
__desugars_to_v<__greater_tag, __remove_cvref_t<_Compare>, _Tp, _Tp>);
(__desugars_to_v<__less_tag, _Compare, _Tp, _Tp> || __desugars_to_v<__greater_tag, _Compare, _Tp, _Tp>);
namespace __detail {
@ -359,10 +359,10 @@ inline _LIBCPP_HIDE_FROM_ABI void __swap_bitmap_pos(
// Swap one pair on each iteration as long as both bitsets have at least one
// element for swapping.
while (__left_bitset != 0 && __right_bitset != 0) {
difference_type __tz_left = __libcpp_ctz(__left_bitset);
__left_bitset = __libcpp_blsr(__left_bitset);
difference_type __tz_right = __libcpp_ctz(__right_bitset);
__right_bitset = __libcpp_blsr(__right_bitset);
difference_type __tz_left = std::__countr_zero(__left_bitset);
__left_bitset = std::__libcpp_blsr(__left_bitset);
difference_type __tz_right = std::__countr_zero(__right_bitset);
__right_bitset = std::__libcpp_blsr(__right_bitset);
_Ops::iter_swap(__first + __tz_left, __last - __tz_right);
}
}
@ -458,7 +458,7 @@ inline _LIBCPP_HIDE_FROM_ABI void __swap_bitmap_pos_within(
// Swap within the left side. Need to find set positions in the reverse
// order.
while (__left_bitset != 0) {
difference_type __tz_left = __detail::__block_size - 1 - __libcpp_clz(__left_bitset);
difference_type __tz_left = __detail::__block_size - 1 - std::__countl_zero(__left_bitset);
__left_bitset &= (static_cast<uint64_t>(1) << __tz_left) - 1;
_RandomAccessIterator __it = __first + __tz_left;
if (__it != __lm1) {
@ -471,7 +471,7 @@ inline _LIBCPP_HIDE_FROM_ABI void __swap_bitmap_pos_within(
// Swap within the right side. Need to find set positions in the reverse
// order.
while (__right_bitset != 0) {
difference_type __tz_right = __detail::__block_size - 1 - __libcpp_clz(__right_bitset);
difference_type __tz_right = __detail::__block_size - 1 - std::__countl_zero(__right_bitset);
__right_bitset &= (static_cast<uint64_t>(1) << __tz_right) - 1;
_RandomAccessIterator __it = __lm1 - __tz_right;
if (__it != __first) {
@ -828,25 +828,6 @@ void __introsort(_RandomAccessIterator __first,
}
}
template <typename _Number>
inline _LIBCPP_HIDE_FROM_ABI _Number __log2i(_Number __n) {
if (__n == 0)
return 0;
if (sizeof(__n) <= sizeof(unsigned))
return sizeof(unsigned) * CHAR_BIT - 1 - __libcpp_clz(static_cast<unsigned>(__n));
if (sizeof(__n) <= sizeof(unsigned long))
return sizeof(unsigned long) * CHAR_BIT - 1 - __libcpp_clz(static_cast<unsigned long>(__n));
if (sizeof(__n) <= sizeof(unsigned long long))
return sizeof(unsigned long long) * CHAR_BIT - 1 - __libcpp_clz(static_cast<unsigned long long>(__n));
_Number __log2 = 0;
while (__n > 1) {
__log2++;
__n >>= 1;
}
return __log2;
}
template <class _Comp, class _RandomAccessIterator>
void __sort(_RandomAccessIterator, _RandomAccessIterator, _Comp);
@ -880,7 +861,7 @@ template <class _AlgPolicy, class _RandomAccessIterator, class _Comp>
_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 void
__sort_dispatch(_RandomAccessIterator __first, _RandomAccessIterator __last, _Comp& __comp) {
typedef typename iterator_traits<_RandomAccessIterator>::difference_type difference_type;
difference_type __depth_limit = 2 * std::__log2i(__last - __first);
difference_type __depth_limit = 2 * std::__bit_log2(std::__to_unsigned_like(__last - __first));
// Only use bitset partitioning for arithmetic types. We should also check
// that the default comparator is in use so that we are sure that there are no

View File

@ -16,6 +16,7 @@
#include <__iterator/advance.h>
#include <__iterator/distance.h>
#include <__iterator/iterator_traits.h>
#include <__memory/construct_at.h>
#include <__memory/destruct_n.h>
#include <__memory/unique_ptr.h>
#include <__memory/unique_temporary_buffer.h>
@ -33,7 +34,7 @@ _LIBCPP_PUSH_MACROS
_LIBCPP_BEGIN_NAMESPACE_STD
template <class _AlgPolicy, class _Predicate, class _ForwardIterator, class _Distance, class _Pair>
_LIBCPP_HIDE_FROM_ABI _ForwardIterator __stable_partition_impl(
_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 _ForwardIterator __stable_partition_impl(
_ForwardIterator __first,
_ForwardIterator __last,
_Predicate __pred,
@ -61,7 +62,7 @@ _LIBCPP_HIDE_FROM_ABI _ForwardIterator __stable_partition_impl(
// Move the falses into the temporary buffer, and the trues to the front of the line
// Update __first to always point to the end of the trues
value_type* __t = __p.first;
::new ((void*)__t) value_type(_Ops::__iter_move(__first));
std::__construct_at(__t, _Ops::__iter_move(__first));
__d.template __incr<value_type>();
++__t;
_ForwardIterator __i = __first;
@ -70,7 +71,7 @@ _LIBCPP_HIDE_FROM_ABI _ForwardIterator __stable_partition_impl(
*__first = _Ops::__iter_move(__i);
++__first;
} else {
::new ((void*)__t) value_type(_Ops::__iter_move(__i));
std::__construct_at(__t, _Ops::__iter_move(__i));
__d.template __incr<value_type>();
++__t;
}
@ -116,7 +117,7 @@ __second_half_done:
}
template <class _AlgPolicy, class _Predicate, class _ForwardIterator>
_LIBCPP_HIDE_FROM_ABI _ForwardIterator
_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 _ForwardIterator
__stable_partition_impl(_ForwardIterator __first, _ForwardIterator __last, _Predicate __pred, forward_iterator_tag) {
typedef typename iterator_traits<_ForwardIterator>::difference_type difference_type;
typedef typename iterator_traits<_ForwardIterator>::value_type value_type;
@ -145,7 +146,7 @@ __stable_partition_impl(_ForwardIterator __first, _ForwardIterator __last, _Pred
}
template <class _AlgPolicy, class _Predicate, class _BidirectionalIterator, class _Distance, class _Pair>
_BidirectionalIterator __stable_partition_impl(
_LIBCPP_CONSTEXPR_SINCE_CXX26 _BidirectionalIterator __stable_partition_impl(
_BidirectionalIterator __first,
_BidirectionalIterator __last,
_Predicate __pred,
@ -179,7 +180,7 @@ _BidirectionalIterator __stable_partition_impl(
// Move the falses into the temporary buffer, and the trues to the front of the line
// Update __first to always point to the end of the trues
value_type* __t = __p.first;
::new ((void*)__t) value_type(_Ops::__iter_move(__first));
std::__construct_at(__t, _Ops::__iter_move(__first));
__d.template __incr<value_type>();
++__t;
_BidirectionalIterator __i = __first;
@ -188,7 +189,7 @@ _BidirectionalIterator __stable_partition_impl(
*__first = _Ops::__iter_move(__i);
++__first;
} else {
::new ((void*)__t) value_type(_Ops::__iter_move(__i));
std::__construct_at(__t, _Ops::__iter_move(__i));
__d.template __incr<value_type>();
++__t;
}
@ -247,7 +248,7 @@ __second_half_done:
}
template <class _AlgPolicy, class _Predicate, class _BidirectionalIterator>
_LIBCPP_HIDE_FROM_ABI _BidirectionalIterator __stable_partition_impl(
_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 _BidirectionalIterator __stable_partition_impl(
_BidirectionalIterator __first, _BidirectionalIterator __last, _Predicate __pred, bidirectional_iterator_tag) {
typedef typename iterator_traits<_BidirectionalIterator>::difference_type difference_type;
typedef typename iterator_traits<_BidirectionalIterator>::value_type value_type;
@ -283,14 +284,14 @@ _LIBCPP_HIDE_FROM_ABI _BidirectionalIterator __stable_partition_impl(
}
template <class _AlgPolicy, class _Predicate, class _ForwardIterator, class _IterCategory>
_LIBCPP_HIDE_FROM_ABI _ForwardIterator __stable_partition(
_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX26 _ForwardIterator __stable_partition(
_ForwardIterator __first, _ForwardIterator __last, _Predicate&& __pred, _IterCategory __iter_category) {
return std::__stable_partition_impl<_AlgPolicy, __remove_cvref_t<_Predicate>&>(
std::move(__first), std::move(__last), __pred, __iter_category);
}
template <class _ForwardIterator, class _Predicate>
inline _LIBCPP_HIDE_FROM_ABI _ForwardIterator
_LIBCPP_HIDE_FROM_ABI inline _LIBCPP_CONSTEXPR_SINCE_CXX26 _ForwardIterator
stable_partition(_ForwardIterator __first, _ForwardIterator __last, _Predicate __pred) {
using _IterCategory = typename iterator_traits<_ForwardIterator>::iterator_category;
return std::__stable_partition<_ClassicAlgPolicy, _Predicate&>(

View File

@ -25,10 +25,9 @@
#include <__memory/unique_temporary_buffer.h>
#include <__type_traits/desugars_to.h>
#include <__type_traits/enable_if.h>
#include <__type_traits/is_integral.h>
#include <__type_traits/is_constant_evaluated.h>
#include <__type_traits/is_same.h>
#include <__type_traits/is_trivially_assignable.h>
#include <__type_traits/remove_cvref.h>
#include <__utility/move.h>
#include <__utility/pair.h>
@ -201,7 +200,7 @@ struct __stable_sort_switch {
#if _LIBCPP_STD_VER >= 17
template <class _Tp>
_LIBCPP_HIDE_FROM_ABI constexpr unsigned __radix_sort_min_bound() {
static_assert(is_integral<_Tp>::value);
static_assert(__is_ordered_integer_representable_v<_Tp>);
if constexpr (sizeof(_Tp) == 1) {
return 1 << 8;
}
@ -211,7 +210,7 @@ _LIBCPP_HIDE_FROM_ABI constexpr unsigned __radix_sort_min_bound() {
template <class _Tp>
_LIBCPP_HIDE_FROM_ABI constexpr unsigned __radix_sort_max_bound() {
static_assert(is_integral<_Tp>::value);
static_assert(__is_ordered_integer_representable_v<_Tp>);
if constexpr (sizeof(_Tp) >= 8) {
return 1 << 15;
}
@ -245,14 +244,19 @@ _LIBCPP_CONSTEXPR_SINCE_CXX26 void __stable_sort(
}
#if _LIBCPP_STD_VER >= 17
constexpr auto __default_comp =
__desugars_to_v<__totally_ordered_less_tag, __remove_cvref_t<_Compare>, value_type, value_type >;
constexpr auto __integral_value =
is_integral_v<value_type > && is_same_v< value_type&, __iter_reference<_RandomAccessIterator>>;
constexpr auto __allowed_radix_sort = __default_comp && __integral_value;
if constexpr (__allowed_radix_sort) {
if (__len <= __buff_size && __len >= static_cast<difference_type>(__radix_sort_min_bound<value_type>()) &&
__len <= static_cast<difference_type>(__radix_sort_max_bound<value_type>())) {
constexpr auto __default_comp = __desugars_to_v<__less_tag, _Compare, value_type, value_type >;
constexpr auto __radix_sortable =
__is_ordered_integer_representable_v<value_type> &&
is_same_v< value_type&, __iter_reference<_RandomAccessIterator>>;
if constexpr (__default_comp && __radix_sortable) {
if (__len <= __buff_size && __len >= static_cast<difference_type>(std::__radix_sort_min_bound<value_type>()) &&
__len <= static_cast<difference_type>(std::__radix_sort_max_bound<value_type>())) {
if (__libcpp_is_constant_evaluated()) {
for (auto* __p = __buff; __p < __buff + __buff_size; ++__p) {
std::__construct_at(__p);
}
}
std::__radix_sort(__first, __last, __buff);
return;
}

View File

@ -10,9 +10,12 @@
#define _LIBCPP___ALGORITHM_SWAP_RANGES_H
#include <__algorithm/iterator_operations.h>
#include <__algorithm/min.h>
#include <__config>
#include <__fwd/bit_reference.h>
#include <__utility/move.h>
#include <__utility/pair.h>
#include <__utility/swap.h>
#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER)
# pragma GCC system_header
@ -23,6 +26,165 @@ _LIBCPP_PUSH_MACROS
_LIBCPP_BEGIN_NAMESPACE_STD
template <class _Cl, class _Cr>
_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 __bit_iterator<_Cr, false> __swap_ranges_aligned(
__bit_iterator<_Cl, false> __first, __bit_iterator<_Cl, false> __last, __bit_iterator<_Cr, false> __result) {
using _I1 = __bit_iterator<_Cl, false>;
using difference_type = typename _I1::difference_type;
using __storage_type = typename _I1::__storage_type;
const int __bits_per_word = _I1::__bits_per_word;
difference_type __n = __last - __first;
if (__n > 0) {
// do first word
if (__first.__ctz_ != 0) {
unsigned __clz = __bits_per_word - __first.__ctz_;
difference_type __dn = std::min(static_cast<difference_type>(__clz), __n);
__n -= __dn;
__storage_type __m = (~__storage_type(0) << __first.__ctz_) & (~__storage_type(0) >> (__clz - __dn));
__storage_type __b1 = *__first.__seg_ & __m;
*__first.__seg_ &= ~__m;
__storage_type __b2 = *__result.__seg_ & __m;
*__result.__seg_ &= ~__m;
*__result.__seg_ |= __b1;
*__first.__seg_ |= __b2;
__result.__seg_ += (__dn + __result.__ctz_) / __bits_per_word;
__result.__ctz_ = static_cast<unsigned>((__dn + __result.__ctz_) % __bits_per_word);
++__first.__seg_;
// __first.__ctz_ = 0;
}
// __first.__ctz_ == 0;
// do middle words
for (; __n >= __bits_per_word; __n -= __bits_per_word, ++__first.__seg_, ++__result.__seg_)
swap(*__first.__seg_, *__result.__seg_);
// do last word
if (__n > 0) {
__storage_type __m = ~__storage_type(0) >> (__bits_per_word - __n);
__storage_type __b1 = *__first.__seg_ & __m;
*__first.__seg_ &= ~__m;
__storage_type __b2 = *__result.__seg_ & __m;
*__result.__seg_ &= ~__m;
*__result.__seg_ |= __b1;
*__first.__seg_ |= __b2;
__result.__ctz_ = static_cast<unsigned>(__n);
}
}
return __result;
}
template <class _Cl, class _Cr>
_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 __bit_iterator<_Cr, false> __swap_ranges_unaligned(
__bit_iterator<_Cl, false> __first, __bit_iterator<_Cl, false> __last, __bit_iterator<_Cr, false> __result) {
using _I1 = __bit_iterator<_Cl, false>;
using difference_type = typename _I1::difference_type;
using __storage_type = typename _I1::__storage_type;
const int __bits_per_word = _I1::__bits_per_word;
difference_type __n = __last - __first;
if (__n > 0) {
// do first word
if (__first.__ctz_ != 0) {
unsigned __clz_f = __bits_per_word - __first.__ctz_;
difference_type __dn = std::min(static_cast<difference_type>(__clz_f), __n);
__n -= __dn;
__storage_type __m = (~__storage_type(0) << __first.__ctz_) & (~__storage_type(0) >> (__clz_f - __dn));
__storage_type __b1 = *__first.__seg_ & __m;
*__first.__seg_ &= ~__m;
unsigned __clz_r = __bits_per_word - __result.__ctz_;
__storage_type __ddn = std::min<__storage_type>(__dn, __clz_r);
__m = (~__storage_type(0) << __result.__ctz_) & (~__storage_type(0) >> (__clz_r - __ddn));
__storage_type __b2 = *__result.__seg_ & __m;
*__result.__seg_ &= ~__m;
if (__result.__ctz_ > __first.__ctz_) {
unsigned __s = __result.__ctz_ - __first.__ctz_;
*__result.__seg_ |= __b1 << __s;
*__first.__seg_ |= __b2 >> __s;
} else {
unsigned __s = __first.__ctz_ - __result.__ctz_;
*__result.__seg_ |= __b1 >> __s;
*__first.__seg_ |= __b2 << __s;
}
__result.__seg_ += (__ddn + __result.__ctz_) / __bits_per_word;
__result.__ctz_ = static_cast<unsigned>((__ddn + __result.__ctz_) % __bits_per_word);
__dn -= __ddn;
if (__dn > 0) {
__m = ~__storage_type(0) >> (__bits_per_word - __dn);
__b2 = *__result.__seg_ & __m;
*__result.__seg_ &= ~__m;
unsigned __s = __first.__ctz_ + __ddn;
*__result.__seg_ |= __b1 >> __s;
*__first.__seg_ |= __b2 << __s;
__result.__ctz_ = static_cast<unsigned>(__dn);
}
++__first.__seg_;
// __first.__ctz_ = 0;
}
// __first.__ctz_ == 0;
// do middle words
__storage_type __m = ~__storage_type(0) << __result.__ctz_;
unsigned __clz_r = __bits_per_word - __result.__ctz_;
for (; __n >= __bits_per_word; __n -= __bits_per_word, ++__first.__seg_) {
__storage_type __b1 = *__first.__seg_;
__storage_type __b2 = *__result.__seg_ & __m;
*__result.__seg_ &= ~__m;
*__result.__seg_ |= __b1 << __result.__ctz_;
*__first.__seg_ = __b2 >> __result.__ctz_;
++__result.__seg_;
__b2 = *__result.__seg_ & ~__m;
*__result.__seg_ &= __m;
*__result.__seg_ |= __b1 >> __clz_r;
*__first.__seg_ |= __b2 << __clz_r;
}
// do last word
if (__n > 0) {
__m = ~__storage_type(0) >> (__bits_per_word - __n);
__storage_type __b1 = *__first.__seg_ & __m;
*__first.__seg_ &= ~__m;
__storage_type __dn = std::min<__storage_type>(__n, __clz_r);
__m = (~__storage_type(0) << __result.__ctz_) & (~__storage_type(0) >> (__clz_r - __dn));
__storage_type __b2 = *__result.__seg_ & __m;
*__result.__seg_ &= ~__m;
*__result.__seg_ |= __b1 << __result.__ctz_;
*__first.__seg_ |= __b2 >> __result.__ctz_;
__result.__seg_ += (__dn + __result.__ctz_) / __bits_per_word;
__result.__ctz_ = static_cast<unsigned>((__dn + __result.__ctz_) % __bits_per_word);
__n -= __dn;
if (__n > 0) {
__m = ~__storage_type(0) >> (__bits_per_word - __n);
__b2 = *__result.__seg_ & __m;
*__result.__seg_ &= ~__m;
*__result.__seg_ |= __b1 >> __dn;
*__first.__seg_ |= __b2 << __dn;
__result.__ctz_ = static_cast<unsigned>(__n);
}
}
}
return __result;
}
// 2+1 iterators: size2 >= size1; used by std::swap_ranges.
template <class, class _Cl, class _Cr>
_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 pair<__bit_iterator<_Cl, false>, __bit_iterator<_Cr, false> >
__swap_ranges(__bit_iterator<_Cl, false> __first1,
__bit_iterator<_Cl, false> __last1,
__bit_iterator<_Cr, false> __first2) {
if (__first1.__ctz_ == __first2.__ctz_)
return std::make_pair(__last1, std::__swap_ranges_aligned(__first1, __last1, __first2));
return std::make_pair(__last1, std::__swap_ranges_unaligned(__first1, __last1, __first2));
}
// 2+2 iterators: used by std::ranges::swap_ranges.
template <class _AlgPolicy, class _Cl, class _Cr>
_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 pair<__bit_iterator<_Cl, false>, __bit_iterator<_Cr, false> >
__swap_ranges(__bit_iterator<_Cl, false> __first1,
__bit_iterator<_Cl, false> __last1,
__bit_iterator<_Cr, false> __first2,
__bit_iterator<_Cr, false> __last2) {
if (__last1 - __first1 < __last2 - __first2)
return std::make_pair(__last1, std::__swap_ranges<_AlgPolicy>(__first1, __last1, __first2).second);
return std::make_pair(std::__swap_ranges<_AlgPolicy>(__first2, __last2, __first1).second, __last2);
}
// 2+2 iterators: the shorter size will be used.
template <class _AlgPolicy, class _ForwardIterator1, class _Sentinel1, class _ForwardIterator2, class _Sentinel2>
_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 pair<_ForwardIterator1, _ForwardIterator2>

Some files were not shown because too many files have changed in this diff Show More